language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | realpython__materials | web-scraping-with-scrapy-and-mongodb/books/tests/test_book.py | {
"start": 304,
"end": 2518
} | class ____(unittest.TestCase):
def setUp(self):
self.spider = BookSpider()
self.example_html = _get_sample_html_content()
self.response = HtmlResponse(
url="https://books.toscrape.com",
body=self.example_html,
encoding="utf-8",
)
def test_parse_scrapes_all_items(self):
"""Test if the spider scrapes all books and pagination links."""
# Collect the items produced by the generator in a list
# so that it's possible to iterate over it more than once.
results = list(self.spider.parse(self.response))
# There should be two book items and one pagination request
book_items = [item for item in results if isinstance(item, BooksItem)]
pagination_requests = [
item for item in results if isinstance(item, Request)
]
self.assertEqual(len(book_items), 2)
self.assertEqual(len(pagination_requests), 1)
def test_parse_scrapes_correct_book_information(self):
"""Test if the spider scrapes the correct information for each book."""
results_generator = self.spider.parse(self.response)
# Book 1
book_1 = next(results_generator)
self.assertEqual(
book_1["url"], "catalogue/a-light-in-the-attic_1000/index.html"
)
self.assertEqual(book_1["title"], "A Light in the Attic")
self.assertEqual(book_1["price"], "£51.77")
# Book 2
book_2 = next(results_generator)
self.assertEqual(
book_2["url"], "catalogue/tipping-the-velvet_999/index.html"
)
self.assertEqual(book_2["title"], "Tipping the Velvet")
self.assertEqual(book_2["price"], "£53.74")
def test_parse_creates_pagination_request(self):
"""Test if the spider creates a pagination request correctly."""
results = list(self.spider.parse(self.response))
next_page_request = results[-1]
self.assertIsInstance(next_page_request, Request)
self.assertEqual(
next_page_request.url,
"https://books.toscrape.com/catalogue/page-2.html",
)
if __name__ == "__main__":
unittest.main()
| BookSpiderTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/transfers/s3_to_redshift.py | {
"start": 1504,
"end": 12036
} | class ____(BaseOperator):
"""
Executes an COPY command to load files from s3 to Redshift.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3ToRedshiftOperator`
:param table: reference to a specific table in redshift database
:param s3_bucket: reference to a specific S3 bucket
:param s3_key: key prefix that selects single or multiple objects from S3
:param schema: reference to a specific schema in redshift database.
Do not provide when copying into a temporary table
:param redshift_conn_id: reference to a specific redshift database OR a redshift data-api connection
:param aws_conn_id: reference to a specific S3 connection
If the AWS connection contains 'aws_iam_role' in ``extras``
the operator will use AWS STS credentials with a token
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials
:param verify: Whether to verify SSL certificates for S3 connection.
By default, SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param column_list: list of column names to load source data fields into specific target columns
https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-column-mapping.html#copy-column-list
:param copy_options: reference to a list of COPY options
:param method: Action to be performed on execution. Available ``APPEND``, ``UPSERT`` and ``REPLACE``.
:param upsert_keys: List of fields to use as key on upsert action
:param redshift_data_api_kwargs: If using the Redshift Data API instead of the SQL-based connection,
dict of arguments for the hook's ``execute_query`` method.
Cannot include any of these kwargs: ``{'sql', 'parameters'}``
"""
template_fields: Sequence[str] = (
"s3_bucket",
"s3_key",
"schema",
"table",
"column_list",
"copy_options",
"redshift_conn_id",
"method",
"redshift_data_api_kwargs",
"aws_conn_id",
)
template_ext: Sequence[str] = ()
ui_color = "#99e699"
def __init__(
self,
*,
table: str,
s3_bucket: str,
s3_key: str,
schema: str | None = None,
redshift_conn_id: str = "redshift_default",
aws_conn_id: str | None | ArgNotSet = NOTSET,
verify: bool | str | None = None,
column_list: list[str] | None = None,
copy_options: list | None = None,
autocommit: bool = False,
method: str = "APPEND",
upsert_keys: list[str] | None = None,
redshift_data_api_kwargs: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.schema = schema
self.table = table
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.redshift_conn_id = redshift_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.column_list = column_list
self.copy_options = copy_options or []
self.autocommit = autocommit
self.method = method
self.upsert_keys = upsert_keys
self.redshift_data_api_kwargs = redshift_data_api_kwargs or {}
# In execute() we attempt to fetch this aws connection to check for extras. If the user didn't
# actually provide a connection note that, because we don't want to let the exception bubble up in
# that case (since we're silently injecting a connection on their behalf).
self._aws_conn_id: str | None
if is_arg_set(aws_conn_id):
self.conn_set = True
self._aws_conn_id = aws_conn_id
else:
self.conn_set = False
self._aws_conn_id = "aws_default"
if self.redshift_data_api_kwargs:
for arg in ["sql", "parameters"]:
if arg in self.redshift_data_api_kwargs:
raise AirflowException(f"Cannot include param '{arg}' in Redshift Data API kwargs")
@property
def use_redshift_data(self):
return bool(self.redshift_data_api_kwargs)
def _build_copy_query(
self, copy_destination: str, credentials_block: str, region_info: str, copy_options: str
) -> str:
column_names = "(" + ", ".join(self.column_list) + ")" if self.column_list else ""
return f"""
COPY {copy_destination} {column_names}
FROM 's3://{self.s3_bucket}/{self.s3_key}'
credentials
'{credentials_block}'
{region_info}
{copy_options};
"""
def execute(self, context: Context) -> None:
if self.method not in AVAILABLE_METHODS:
raise AirflowException(f"Method not found! Available methods: {AVAILABLE_METHODS}")
if self.use_redshift_data:
redshift_data_hook = RedshiftDataHook(aws_conn_id=self.redshift_conn_id)
else:
redshift_sql_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
conn = (
S3Hook.get_connection(conn_id=self._aws_conn_id)
# Only fetch the connection if it was set by the user and it is not None
if self.conn_set and self._aws_conn_id
else None
)
region_info = ""
if conn and conn.extra_dejson.get("region", False):
region_info = f"region '{conn.extra_dejson['region']}'"
if conn and conn.extra_dejson.get("role_arn", False):
credentials_block = f"aws_iam_role={conn.extra_dejson['role_arn']}"
else:
s3_hook = S3Hook(aws_conn_id=self._aws_conn_id, verify=self.verify)
credentials = s3_hook.get_credentials()
credentials_block = build_credentials_block(credentials)
copy_options = "\n\t\t\t".join(self.copy_options)
destination = f"{self.schema}.{self.table}" if self.schema else self.table
copy_destination = f"#{self.table}" if self.method == "UPSERT" else destination
copy_statement = self._build_copy_query(
copy_destination, credentials_block, region_info, copy_options
)
sql: str | Iterable[str]
if self.method == "REPLACE":
sql = ["BEGIN;", f"DELETE FROM {destination};", copy_statement, "COMMIT"]
elif self.method == "UPSERT":
if self.use_redshift_data:
keys = self.upsert_keys or redshift_data_hook.get_table_primary_key(
table=self.table, schema=self.schema, **self.redshift_data_api_kwargs
)
else:
keys = self.upsert_keys or redshift_sql_hook.get_table_primary_key(self.table, self.schema)
if not keys:
raise AirflowException(
f"No primary key on {self.schema}.{self.table}. Please provide keys on 'upsert_keys'"
)
where_statement = " AND ".join([f"{self.table}.{k} = {copy_destination}.{k}" for k in keys])
sql = [
f"CREATE TABLE {copy_destination} (LIKE {destination} INCLUDING DEFAULTS);",
copy_statement,
"BEGIN;",
f"DELETE FROM {destination} USING {copy_destination} WHERE {where_statement};",
f"INSERT INTO {destination} SELECT * FROM {copy_destination};",
"COMMIT",
]
else:
sql = copy_statement
self.log.info("Executing COPY command...")
if self.use_redshift_data:
redshift_data_hook.execute_query(sql=sql, **self.redshift_data_api_kwargs)
else:
redshift_sql_hook.run(sql, autocommit=self.autocommit)
self.log.info("COPY command complete...")
def get_openlineage_facets_on_complete(self, task_instance):
"""Implement on_complete as we will query destination table."""
from airflow.providers.amazon.aws.utils.openlineage import (
get_facets_from_redshift_table,
)
from airflow.providers.common.compat.openlineage.facet import (
Dataset,
LifecycleStateChange,
LifecycleStateChangeDatasetFacet,
)
from airflow.providers.openlineage.extractors import OperatorLineage
if self.use_redshift_data:
redshift_data_hook = RedshiftDataHook(aws_conn_id=self.redshift_conn_id)
database = self.redshift_data_api_kwargs.get("database")
identifier = self.redshift_data_api_kwargs.get(
"cluster_identifier", self.redshift_data_api_kwargs.get("workgroup_name")
)
port = self.redshift_data_api_kwargs.get("port", "5439")
authority = f"{identifier}.{redshift_data_hook.region_name}:{port}"
output_dataset_facets = get_facets_from_redshift_table(
redshift_data_hook, self.table, self.redshift_data_api_kwargs, self.schema
)
else:
redshift_sql_hook = RedshiftSQLHook(redshift_conn_id=self.redshift_conn_id)
database = redshift_sql_hook.conn.schema
authority = redshift_sql_hook.get_openlineage_database_info(redshift_sql_hook.conn).authority
output_dataset_facets = get_facets_from_redshift_table(
redshift_sql_hook, self.table, {}, self.schema
)
if self.method == "REPLACE":
output_dataset_facets["lifecycleStateChange"] = LifecycleStateChangeDatasetFacet(
lifecycleStateChange=LifecycleStateChange.OVERWRITE
)
output_dataset = Dataset(
namespace=f"redshift://{authority}",
name=f"{database}.{self.schema}.{self.table}" if database else f"{self.schema}.{self.table}",
facets=output_dataset_facets,
)
input_dataset = Dataset(
namespace=f"s3://{self.s3_bucket}",
name=self.s3_key,
)
return OperatorLineage(inputs=[input_dataset], outputs=[output_dataset])
| S3ToRedshiftOperator |
python | pandas-dev__pandas | pandas/tests/indexes/categorical/test_append.py | {
"start": 102,
"end": 2245
} | class ____:
@pytest.fixture
def ci(self):
categories = list("cab")
return CategoricalIndex(list("aabbca"), categories=categories, ordered=False)
def test_append(self, ci):
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
def test_append_empty(self, ci):
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
def test_append_mismatched_categories(self, ci):
# appending with different categories or reordered is not ok
msg = "all inputs must be Index"
with pytest.raises(TypeError, match=msg):
ci.append(ci.values.set_categories(list("abcd")))
with pytest.raises(TypeError, match=msg):
ci.append(ci.values.reorder_categories(list("abc")))
def test_append_category_objects(self, ci):
# GH#41626 pre-3.0 this used to cast the object-dtype index to
# ci.dtype
# with objects
result = ci.append(Index(["c", "a"]))
expected = Index(list("aabbcaca"))
tm.assert_index_equal(result, expected, exact=True)
def test_append_non_categories(self, ci):
# invalid objects -> cast to object via concat_compat
result = ci.append(Index(["a", "d"]))
expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"])
tm.assert_index_equal(result, expected, exact=True)
def test_append_object(self, ci):
# GH#14298 - if base object is not categorical -> coerce to object
result = Index(["c", "a"]).append(ci)
expected = Index(list("caaabbca"))
tm.assert_index_equal(result, expected, exact=True)
def test_append_to_another(self):
# hits Index._concat
fst = Index(["a", "b"])
snd = CategoricalIndex(["d", "e"])
result = fst.append(snd)
expected = Index(["a", "b", "d", "e"])
tm.assert_index_equal(result, expected)
| TestAppend |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 14847,
"end": 15822
} | class ____(BaseExtensionApp):
description = "Disable labextension(s) by name"
aliases = disable_aliases
level = Unicode("sys_prefix", help="Level at which to disable: sys_prefix, user, system").tag(
config=True
)
def run_task(self):
app_options = AppOptions(
app_dir=self.app_dir,
logger=self.log,
core_config=self.core_config,
labextensions_path=self.labextensions_path,
)
[
disable_extension(arg, app_options=app_options, level=self.level)
for arg in self.extra_args
]
self.log.info(
"Starting with JupyterLab 4.1 individual plugins can be re-enabled"
" in the user interface. While all plugins which were previously"
" disabled have been locked, you need to explicitly lock any newly"
" disabled plugins by using `jupyter labextension lock` command."
)
| DisableLabExtensionsApp |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 42064,
"end": 42420
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("ro_RO")
Faker.seed(0)
def test_day(self):
day = self.fake.day_of_week()
assert day in RoRoProvider.DAY_NAMES.values()
def test_month(self):
month = self.fake.month_name()
assert month in RoRoProvider.MONTH_NAMES.values()
| TestRoRo |
python | cython__cython | Cython/Shadow.py | {
"start": 16982,
"end": 17865
} | class ____:
"""
cython.dataclasses just shadows the standard library modules of the same name
"""
def __init__(self, module):
self.__path__ = []
self.__file__ = None
self.__name__ = module
self.__package__ = module
def __getattr__(self, attr):
# we typically only expect this to be called once
from importlib import import_module
import sys
try:
mod = import_module(self.__name__)
except ImportError:
# but if they don't exist (Python is not sufficiently up-to-date) then
# you can't use them
raise AttributeError("%s: the standard library module %s is not available" %
(attr, self.__name__))
sys.modules['cython.%s' % self.__name__] = mod
return getattr(mod, attr)
| CythonDotImportedFromElsewhere |
python | pytorch__pytorch | test/onnx/model_defs/lstm_flattening_result.py | {
"start": 69,
"end": 274
} | class ____(nn.LSTM):
def forward(self, input, *fargs, **fkwargs):
output, (hidden, cell) = nn.LSTM.forward(self, input, *fargs, **fkwargs)
return output, hidden, cell
| LstmFlatteningResult |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 4818,
"end": 4874
} | class ____(BertSelfAttention):
pass
| ErnieSelfAttention |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_claim_configuration.py | {
"start": 383,
"end": 5016
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'opaque': 'V1beta2OpaqueDeviceConfiguration',
'requests': 'list[str]'
}
attribute_map = {
'opaque': 'opaque',
'requests': 'requests'
}
def __init__(self, opaque=None, requests=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceClaimConfiguration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._opaque = None
self._requests = None
self.discriminator = None
if opaque is not None:
self.opaque = opaque
if requests is not None:
self.requests = requests
@property
def opaque(self):
"""Gets the opaque of this V1beta2DeviceClaimConfiguration. # noqa: E501
:return: The opaque of this V1beta2DeviceClaimConfiguration. # noqa: E501
:rtype: V1beta2OpaqueDeviceConfiguration
"""
return self._opaque
@opaque.setter
def opaque(self, opaque):
"""Sets the opaque of this V1beta2DeviceClaimConfiguration.
:param opaque: The opaque of this V1beta2DeviceClaimConfiguration. # noqa: E501
:type: V1beta2OpaqueDeviceConfiguration
"""
self._opaque = opaque
@property
def requests(self):
"""Gets the requests of this V1beta2DeviceClaimConfiguration. # noqa: E501
Requests lists the names of requests where the configuration applies. If empty, it applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:return: The requests of this V1beta2DeviceClaimConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._requests
@requests.setter
def requests(self, requests):
"""Sets the requests of this V1beta2DeviceClaimConfiguration.
Requests lists the names of requests where the configuration applies. If empty, it applies to all requests. References to subrequests must include the name of the main request and may include the subrequest using the format <main request>[/<subrequest>]. If just the main request is given, the configuration applies to all subrequests. # noqa: E501
:param requests: The requests of this V1beta2DeviceClaimConfiguration. # noqa: E501
:type: list[str]
"""
self._requests = requests
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceClaimConfiguration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceClaimConfiguration):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceClaimConfiguration |
python | huggingface__transformers | src/transformers/models/whisper/generation_whisper.py | {
"start": 9291,
"end": 110261
} | class ____(GenerationMixin):
def _extract_token_timestamps(
self, generate_outputs, alignment_heads, time_precision=0.02, num_frames=None, num_input_ids=None
):
"""
Calculates token-level timestamps using the encoder-decoder cross-attentions and dynamic time-warping (DTW) to
map each output token to a position in the input audio. If `num_frames` is specified, the encoder-decoder
cross-attentions will be cropped before applying DTW.
Returns:
tensor containing the timestamps in seconds for each predicted token
"""
# Create a list with `decoder_layers` elements, each a tensor of shape
# (batch size * num beams, attention_heads, output length, input length).
cross_attentions = []
for i in range(self.config.decoder_layers):
cross_attentions.append(torch.cat([x[i] for x in generate_outputs.cross_attentions], dim=2))
# Select specific cross-attention layers and heads. This is a tensor
# of shape (batch size * num beams, num selected heads, output length, input length).
weights = torch.stack([cross_attentions[l][:, h] for l, h in alignment_heads])
weights = weights.permute([1, 0, 2, 3])
weight_length = None
if "beam_indices" in generate_outputs:
# If beam search was used, the sequence length of the outputs may not be the real sequence length:
# beam search may end up returning a sequence that finished a few steps earlier while decoding.
# In that case, the `cross_attentions` weights are too long and we have to make sure that they have
# the right `output_length`
# get the real sequence length of the longest sequence, crop the beam_indices to the real length
weight_length = (generate_outputs.beam_indices != -1).sum(-1).max()
beam_indices = generate_outputs.beam_indices[:, :weight_length]
# The first forward pass (prefill) may have processed more than one token and, therefore, contain
# cross-attention weights for several tokens.
# Let's unroll the first `beam_indices` accordingly, so we can use it to gather the weights.
if num_input_ids is not None and num_input_ids > 1:
# `-1`: `beam_indices` can be used as-is to gather the weights when `num_input_ids` is 1
weight_length += num_input_ids - 1
beam_indices_first_step_unrolled = (
torch.ones(beam_indices.shape[0], num_input_ids - 1, device=beam_indices.device, dtype=torch.long)
* (beam_indices[:, 0:1])
)
unrolled_beam_indices = torch.cat([beam_indices_first_step_unrolled, beam_indices], dim=-1)
else:
unrolled_beam_indices = beam_indices
# If beam index is still -1, it means that the associated token id is EOS
# We need to replace the index with 0 since index_select gives an error if any of the indexes is -1.
unrolled_beam_indices = unrolled_beam_indices.masked_fill(unrolled_beam_indices == -1, 0)
# Select the cross attention from the right beam for each output sequence, up to the real sequence
# length (`weight_length`)
weights = torch.stack(
[
torch.index_select(weights[:, :, i, :], dim=0, index=unrolled_beam_indices[:, i])
for i in range(unrolled_beam_indices.shape[1])
],
dim=2,
)
# make sure timestamps are as long as weights
input_length = weight_length or cross_attentions[0].shape[2]
batch_size = generate_outputs.sequences.shape[0]
timestamps = torch.zeros(
(batch_size, input_length + 1), dtype=torch.float32, device=generate_outputs.sequences.device
)
if num_frames is not None:
# two cases:
# 1. num_frames is the same for each sample -> compute the DTW matrix for each sample in parallel
# 2. num_frames is different, compute the DTW matrix for each sample sequentially
# we're using np.unique because num_frames can be int/list/tuple
if isinstance(num_frames, int):
weights = weights[..., : num_frames // 2]
elif isinstance(num_frames, (list, tuple, np.ndarray)) and len(np.unique(num_frames)) == 1:
weights = weights[..., : num_frames[0] // 2]
elif isinstance(num_frames, (torch.Tensor)) and len(torch.unique(num_frames)) == 1:
weights = weights[..., : num_frames[0] // 2]
else:
# num_frames is of shape (batch_size,) whereas batch_size is truly batch_size*num_return_sequences
repeat_time = batch_size if isinstance(num_frames, int) else batch_size // len(num_frames)
num_frames = num_frames.cpu() if isinstance(num_frames, (torch.Tensor)) else num_frames
num_frames = np.repeat(num_frames, repeat_time)
# let's ignore decoder_input_ids that can negatively impact the DTW while we know they have timestamps 0.0s
# (they are not taken into account for the DTW in OAI implementation)
if num_input_ids is not None:
weights = weights[:, :, num_input_ids:, :]
# Since we ignore `decoder_input_ids` in the DTW and in the case where we generated only one token (for which we don't have cross attentions, see below comments),
# the DTW sequence length is 0 and we should return only 0.0s for the token timestamps
if weights.shape[2] == 0:
return timestamps
if num_frames is None or isinstance(num_frames, int):
# Normalize and smoothen the weights.
std = torch.std(weights, dim=-2, keepdim=True, unbiased=False)
mean = torch.mean(weights, dim=-2, keepdim=True)
weights = (weights - mean) / std
weights = _median_filter(weights, self.config.median_filter_width)
# Average the different cross-attention heads.
weights = weights.mean(dim=1)
# Perform dynamic time warping on each element of the batch.
for batch_idx in range(batch_size):
if num_frames is not None and isinstance(num_frames, (tuple, list, np.ndarray, torch.Tensor)):
matrix = weights[batch_idx, ..., : num_frames[batch_idx] // 2]
# Normalize and smoothen the weights.
std = torch.std(matrix, dim=-2, keepdim=True, unbiased=False)
mean = torch.mean(matrix, dim=-2, keepdim=True)
matrix = (matrix - mean) / std
matrix = _median_filter(matrix, self.config.median_filter_width)
# Average the different cross-attention heads.
matrix = matrix.mean(dim=0)
else:
matrix = weights[batch_idx]
text_indices, time_indices = _dynamic_time_warping(-matrix.cpu().double().numpy())
jumps = np.pad(np.diff(text_indices), (1, 0), constant_values=1).astype(bool)
jump_times = time_indices[jumps] * time_precision
# each predicted token has a corresponding timestamp, expect the eos token (or last predicted token) for which we don't retrieve cross attentions
# (indeed contrary to OAI that re-run a full forward to retrieve cross attentions for each token and therefore also the last one predicted, we retrieve
# cross attentions directly from the auto-regressive generation, so we don't have cross attentiosn for the token at the end of the sequence. Nevertheless,
# that is not important since we expect this last token to be the eos token)
# 1. for decoder_input_ids, we set the timestamps to 0.0
# 2. for the eos token (or last predicted token), we simply duplicate the timestamp of the last non-eos token
timestamps[batch_idx] = torch.cat(
[torch.zeros(num_input_ids), torch.tensor(jump_times), torch.tensor([jump_times[-1]])]
)
return timestamps
def generate(
self,
input_features: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
synced_gpus: bool = False,
return_timestamps: Optional[bool] = None,
task: Optional[str] = None,
language: Optional[Union[str, list[str]]] = None,
is_multilingual: Optional[bool] = None,
prompt_ids: Optional[torch.Tensor] = None,
prompt_condition_type: Optional[str] = None, # first-segment, all-segments
condition_on_prev_tokens: Optional[bool] = None,
temperature: Optional[Union[float, tuple[float, ...]]] = None,
compression_ratio_threshold: Optional[float] = None,
logprob_threshold: Optional[float] = None,
no_speech_threshold: Optional[float] = None,
num_segment_frames: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
time_precision: float = 0.02,
time_precision_features: float = 0.01,
return_token_timestamps: Optional[bool] = None,
return_segments: bool = False,
return_dict_in_generate: Optional[bool] = None,
force_unique_generate_call: Optional[bool] = None,
monitor_progress: Optional[Callable[[torch.Tensor], None]] = None,
**kwargs,
):
"""
Transcribes or translates log-mel input features to a sequence of auto-regressively generated token ids.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](../generation_strategies).
</Tip>
Parameters:
input_features (`torch.Tensor` of shape `(batch_size, feature_size, sequence_length)`, *optional*):
Float values of log-mel features extracted from the raw speech waveform. The raw speech waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`,
*e.g.* via the torchcodec library (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel
features, padding and conversion into a tensor of type `torch.FloatTensor`.
See [`~WhisperFeatureExtractor.__call__`] for details.
generation_config ([`~generation.GenerationConfig`], *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
stopping_criteria (`StoppingCriteriaList`, *optional*):
Custom stopping criteria that complement the default stopping criteria built from arguments and a
generation config. If a stopping criteria is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], list[int]]`, *optional*):
If provided, this function constraints the beam search to allowed tokens only at each step. If not
provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and
`input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned
on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful
for constrained generation conditioned on the prefix, as described in [Autoregressive Entity
Retrieval](https://huggingface.co/papers/2010.00904).
synced_gpus (`bool`, *optional*, defaults to `False`):
Whether to continue running the while loop until max_length (needed to avoid deadlocking with
`FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
return_timestamps (`bool`, *optional*):
Whether to return the timestamps with the text. This enables the `WhisperTimestampsLogitsProcessor`.
For audios longer than 30 seconds, it is necessary to set `return_timestamps=True`.
task (`str`, *optional*):
Task to use for generation, either "translate" or "transcribe".
language (`str` or list of `str`, *optional*):
Language token to use for generation, can be either in the form of `<|en|>`, `en` or `english`. For
batched generation, a list of language tokens can be passed. You can find all the possible language
tokens in the `model.generation_config.lang_to_id` dictionary.
is_multilingual (`bool`, *optional*):
Whether or not the model is multilingual.
prompt_ids (`torch.Tensor`, *optional*):
Rank-1 tensor of token IDs created by passing text to [`~WhisperProcessor.get_prompt_ids`] that is
provided as a prompt to each chunk. This can be used to provide or "prompt-engineer" a context for
transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those words
correctly. It cannot be used in conjunction with `decoder_start_token_id` as it overwrites this value.
prompt_condition_type (`str`, *optional*):
Only relevant for long-form transcription. Condition type of `prompt_ids`. 'first-segment' means only the first segment is conditioned on `prompt_ids`. 'all-segments' means each segment is conditioned on `prompt_ids`. Make sure to enable `condition_on_prev_tokens` for 'all-segments'.
Defaults to 'first-segment'. For short-term transcription only 'first-segment' is possible.
condition_on_prev_tokens (`bool`, *optional*):
Only relevant for long-form transcription. Whether to condition each segment on the previous segment.
As shown in the [the Whisper paper](https://cdn.openai.com/papers/whisper.pdf), this can help to improve
performance.
temperature (`float` or list of `float`, *optional*):
The temperature to be used for generation. Passing a single `float` value and `do_sample=True` activates
generation using sampling. For long-form transcription, temperature fallback can be activated by passing
a list of float values such as (0.0, 0.2, 0.4, 0.6, 0.8, 1.0). As shown in the [the Whisper paper](https://cdn.openai.com/papers/whisper.pdf), this can help to improve
performance.
compression_ratio_threshold (`float`, *optional*):
Only relevant for long-form transcription. If defined, the zlib compression rate of each segment will be computed. If the compression rate of
a segment is higher than `compression_ratio_threshold`, temperature fallback is activated: the generated segment is discarded and the generation is
repeated using a higher temperature. The intuition behind this feature is that segments with very high compression rates
suffer from a lot of repetition. The unwanted repetition can be reduced by injecting more randomness by increasing the temperature. If `compression_ratio_threshold` is defined
make sure that `temperature` is a list of values. A common value for `compression_ratio_threshold` is 1.35.
As shown in the [the Whisper paper](https://cdn.openai.com/papers/whisper.pdf), this can help to improve
performance.
logprob_threshold (`float`, *optional*):
Only relevant for long-form transcription. If defined, the average log-probability of each segment will be computed. If the log-probability of
a given segment is lower than `logprob_threshold`, temperature fallback is activated: the generated segment is discarded and the generation is
repeated using a higher temperature. The intuition behind this feature is that segments of low log-probability
can be improved by injecting more randomness by increasing the temperature. If `logprob_threshold` is defined
make sure that `temperature` is a list of values. A common value for `logprob_threshold` is -1.0.
As shown in the [the Whisper paper](https://cdn.openai.com/papers/whisper.pdf), this can help to improve
performance.
no_speech_threshold (`float`, *optional*):
Only relevant for long-form transcription. If defined, the "no-speech" token combined with the `logprob_threshold`
is used to determine whether a segment contains only silence. In this case, the transcription for this segment
is skipped.
As shown in the [the Whisper paper](https://cdn.openai.com/papers/whisper.pdf), this can help to improve
performance.
num_segment_frames (`int`, *optional*):
The number of frames a single segment is made of. If not defined, `num_segment_frames` defaults to the model's stride
times the maximum input length.
attention_mask (`torch.Tensor`, *optional*):
`attention_mask` needs to be passed when doing long-form transcription using a batch size > 1.
time_precision (`int`, *optional*, defaults to 0.02):
The duration of output token in seconds. *E.g.* 0.02 means that a generated token on average accounts
for 20 ms.
time_precision_features (`int`, *optional*, defaults to 0.01):
The duration represented by a feature frame in seconds.
return_token_timestamps (`bool`, *optional*):
Whether to return token-level timestamps with the text. This can be used with or without the
`return_timestamps` option. To get word-level timestamps, use the tokenizer to group the tokens into
words.
return_segments (`bool`, *optional*, defaults to `False`):
Whether to additionally return a list of all segments. Note that this option can only be enabled
when doing long-form transcription.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of just returning the generated tokens.
Note that when doing long-form transcription, `return_dict_in_generate` can only be enabled when
`return_segments` is set True. In this case the generation outputs of each segment is added to each
segment.
force_unique_generate_call (`bool`, *optional*):
Whether to force a unique call to the underlying GenerationMixin's [`~generation.GenerationMixin.generate`] method. This is useful for assisted decoding and testing purposes to ensure
that only one call to [`~generation.GenerationMixin.generate`] is made and therefore decoder input token ids and eos token ids are returned.
monitor_progress (`Callable[[torch.Tensor], None]`, *optional*):
If provided, this function can be called to report the progress of the audio transcription. The function
takes a tensor argument `p` of shape `(n, 2)`, where `n` is the batch size. `p[i, 0]` contains the
index of the audio frame that is currently being transcribed for batch item `i`. `p[i, 1]` contains
the total number of frames for batch item `i`. No return value is expected.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `dict[str, Any]` or `torch.LongTensor`:
One of the following:
- [`~utils.ModelOutput`] when `return_dict_in_generate=True` and (`return_timestamps=False` or `force_unique_generate_call=True`), including the decoder input ids and end of sequence id.
- `dict[str, Any]` when (`return_dict_in_generate=True` and `return_timestamps=True`) or `return_segments=True` or `return_token_timestamps=True`.
- `torch.LongTensor` in all other cases, excluding the decoder input ids and end of sequence id.
The possible [`~utils.ModelOutput`] types are:
- [`~generation.GenerateEncoderDecoderOutput`]
- [`~generation.GenerateBeamEncoderDecoderOutput`]
`segments` is a list of lists (one list per batch element) of `segment`.
A `segment` is a dictionary with keys `start`, `end`, `tokens`, `idxs`, and `result`.
- `start`: the start timestamp of the segment.
- `end`: the end timestamp of the segment.
- `tokens`: the tokens of the segment, excluding the decoder input ids and end of sequence id.
- `idxs`: the start (included) and end (excluded) indices of the `tokens` of the segment in the underlying call to GenerationMixin's [`~generation.GenerationMixin.generate`] (present in `result`).
- `result`: the result of the underlying call to GenerationMixin's [`~generation.GenerationMixin.generate`].
When `return_timestamps=True`, `return_dict_in_generate=True` applies to each call of the underlying GenerationMixin's [`~generation.GenerationMixin.generate`], with outputs stored in `result` of each `segment`.
Example:
- *Longform transcription*: To transcribe or translate audios longer than 30 seconds, process the audio files without truncation and pass all mel features at once to generate. It is necessary to set `return_timestamps=True`.
Indeed, long-form transcription uses a sequential algorithm based on timestamps predictions, with heuristics like compression ratio threshold, log probability threshold and temperature fallback. This algorithm is described in the [the Whisper original paper](https://cdn.openai.com/papers/whisper.pdf), section *3.8. Long-form Transcription*.
```python
>>> import torch
>>> from transformers import AutoProcessor, WhisperForConditionalGeneration
>>> from datasets import load_dataset, Audio
>>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
>>> model.cuda() # doctest: +IGNORE_RESULT
>>> # load audios > 30 seconds
>>> ds = load_dataset("distil-whisper/meanwhile", "default")["test"]
>>> # resample to 16kHz
>>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
>>> # take first 8 audios and retrieve array
>>> audio = ds[:8]["audio"]
>>> audio = [x["array"] for x in audio]
>>> # make sure to NOT truncate the input audio, to return the `attention_mask` and to pad to the longest audio
>>> inputs = processor(audio, return_tensors="pt", truncation=False, padding="longest", return_attention_mask=True, sampling_rate=16_000)
>>> inputs = inputs.to("cuda", torch.float32)
>>> # transcribe audio to ids
>>> generated_ids = model.generate(**inputs, return_timestamps=True)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> transcription[0]
" Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile."
```
The `monitor_progress` callback can be used to monitor the progress of the transcription:
```python
>>> from tqdm import tqdm
>>> # prepare inputs like above
>>> # define a callback to monitor the progress of the transcription.
>>> with tqdm(desc="Progress") as pbar:
>>> def monitor_progress(p_batch):
>>> i = torch.argmax(p_batch[:, 1])
>>> p = p_batch[i].detach().cpu()
>>> pbar.total = int(p[1])
>>> pbar.n = int(p[0])
>>> pbar.update()
>>> # transcribe audio to ids
>>> generated_ids = model.generate(**inputs, return_timestamps=True, monitor_progress=monitor_progress)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)
>>> transcription[0]
Progress: 95%|█████████████████████████████████████████████████████████████████████████████████████████████████▎ | 8497/8901 [00:04<00:00, 2052.79it/s]
" Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on a stained kid's place mat from a defunct dennies. set up a table inside a rusty cargo container down by the Wharf and challenged toothless drifters to the godless bughouse blitz of tournament that is my segment. Meanwhile."
```
- *Shortform transcription*: If passed mel input features are <= 30 seconds, there are two possibilities:
- `return_timestamps=False`: the whole audio will be transcribed with a single call to GenerationMixin's [`~generation.GenerationMixin.generate`].
- `return_timestamps=True`: the audio will be transcribed using the same logic as long-form transcription.
```python
>>> import torch
>>> from transformers import AutoProcessor, WhisperForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_features = inputs.input_features
>>> generated_ids = model.generate(inputs=input_features)
>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```
"""
# 0. deprecate old inputs
if "inputs" in kwargs:
input_features = kwargs.pop("inputs")
warnings.warn(
"The input name `inputs` is deprecated. Please make sure to use `input_features` instead.",
FutureWarning,
)
# 1. prepare generation config
generation_config, kwargs = self._prepare_generation_config(generation_config, **kwargs)
# 2. set global generate variables
input_stride = self.model.encoder.conv1.stride[0] * self.model.encoder.conv2.stride[0]
num_segment_frames = input_stride * self.config.max_source_positions
batch_size, total_input_frames = self._retrieve_total_input_frames(
input_features=input_features, input_stride=input_stride, kwargs=kwargs
)
is_shortform = total_input_frames <= num_segment_frames
# 3. Make sure generation config is correctly set
# Make sure the generation config is correctly set depending on whether timestamps are to be returned or not
return_dict_in_generate = self._set_return_outputs(
return_dict_in_generate=return_dict_in_generate,
return_token_timestamps=return_token_timestamps,
logprob_threshold=logprob_threshold,
generation_config=generation_config,
)
timestamp_begin = self._set_return_timestamps(
return_timestamps=return_timestamps, is_shortform=is_shortform, generation_config=generation_config
)
self._set_language_and_task(
language=language, task=task, is_multilingual=is_multilingual, generation_config=generation_config
)
self._set_num_frames(
return_token_timestamps=return_token_timestamps,
generation_config=generation_config,
attention_mask=attention_mask,
kwargs=kwargs,
)
self._set_thresholds_and_condition(
generation_config=generation_config,
logprob_threshold=logprob_threshold,
compression_ratio_threshold=compression_ratio_threshold,
no_speech_threshold=no_speech_threshold,
condition_on_prev_tokens=condition_on_prev_tokens,
)
self._set_prompt_condition_type(
generation_config=generation_config,
prompt_condition_type=prompt_condition_type,
)
# pass self.config for backward compatibility
init_tokens = self._retrieve_init_tokens(
input_features,
batch_size=batch_size,
generation_config=generation_config,
config=self.config,
num_segment_frames=num_segment_frames,
kwargs=kwargs,
)
# passing `decoder_input_ids` is deprecated - the only exception is for assisted generation
# where the input ids are handled explicitly by the generate method
self._check_decoder_input_ids(kwargs=kwargs)
# `output_attentions` is deprecated - we force eager attention if this feature is
# indirectly requested, e.g. through return_token_timestamps
if return_token_timestamps:
self.model.config._attn_implementation = "eager"
# 3. Retrieve logits processors
device = kwargs["encoder_outputs"][0].device if "encoder_outputs" in kwargs else input_features.device
begin_index = init_tokens.shape[1]
num_beams = kwargs.get(
"num_beams",
generation_config.num_beams
if hasattr(generation_config, "num_beams") and generation_config.num_beams is not None
else 1,
)
if "assistant_model" in kwargs:
# speculative decoding: the model should be able to return eos token
generation_config.begin_suppress_tokens = None
logits_processor = self._retrieve_logit_processors(
generation_config=generation_config,
logits_processor=logits_processor,
begin_index=begin_index, # begin index is index of first generated decoder token
num_beams=num_beams,
device=device,
)
# 4 Set and retrieve global generation variables
self._set_condition_on_prev_tokens(
condition_on_prev_tokens=condition_on_prev_tokens, generation_config=generation_config
)
temperatures = [temperature] if not isinstance(temperature, (list, tuple)) else temperature
temperature = temperatures[0]
max_frames, seek = self._retrieve_max_frames_and_seek(
batch_size=batch_size,
attention_mask=attention_mask,
total_input_frames=total_input_frames,
is_shortform=is_shortform,
)
# 5 Prepare running variables, list for generation
num_return_sequences = generation_config.num_return_sequences
(
batch_idx_map,
cur_bsz,
input_features,
seek,
max_frames,
init_tokens,
do_condition_on_prev_tokens,
) = self._expand_variables_for_generation(
input_features=input_features,
seek=seek,
max_frames=max_frames,
init_tokens=init_tokens,
batch_size=batch_size,
condition_on_prev_tokens=condition_on_prev_tokens,
generation_config=generation_config,
)
current_segments = self._prepare_segments(
prompt_ids=prompt_ids,
batch_size=cur_bsz,
generation_config=generation_config,
)
# 5bis speculative decoding: ensure the assistant model does only one call to generate and therefore returns decoder input token ids and eos token id
# we set a flag in the generation config to force the model to make only one call to generate and return the decoder input token ids and eos token id
if "assistant_model" in kwargs:
assistant_model = kwargs["assistant_model"]
assistant_model.generation_config.force_unique_generate_call = True
if force_unique_generate_call is None:
if hasattr(generation_config, "force_unique_generate_call"):
force_unique_generate_call = generation_config.force_unique_generate_call
elif hasattr(self.generation_config, "force_unique_generate_call"):
force_unique_generate_call = self.generation_config.force_unique_generate_call
else:
force_unique_generate_call = False
# 6 Transcribe audio until we reach the end of all input audios
while (seek < max_frames).any():
if monitor_progress is not None:
monitor_progress(torch.stack((seek, max_frames), dim=1))
# 6.1 NOTE: When in longform transcription mode and batch size > 1 we need to dynamically reduce the batch size during the loop
# in case one audio finished earlier than another one. Thus, we need to keep a table of "previous-index-2-current-index" in order
# to know which original audio is being decoded
# Set updated index map, duration of previously decoded chunks and number of max frames of current decoding chunk
input_features, cur_bsz, batch_idx_map = self._maybe_reduce_batch(
input_features=input_features,
seek=seek,
max_frames=max_frames,
cur_bsz=cur_bsz,
batch_idx_map=batch_idx_map,
)
time_offset = (
seek.to(torch.float32 if device.type == "mps" else torch.float64) * time_precision / input_stride
)
seek_num_frames = (max_frames - seek).clamp(max=num_segment_frames)
# 6.2 cut out next 30s segment from input features
segment_input = self._get_input_segment(
input_features=input_features,
seek=seek,
seek_num_frames=seek_num_frames,
num_segment_frames=num_segment_frames,
cur_bsz=cur_bsz,
batch_idx_map=batch_idx_map,
)
# 6.3 prepare decoder input ids
suppress_tokens = _get_attr_from_logit_processors(
logits_processor, SuppressTokensLogitsProcessor, "suppress_tokens"
)
decoder_input_ids, kwargs = self._prepare_decoder_input_ids(
cur_bsz=cur_bsz,
init_tokens=init_tokens,
current_segments=current_segments,
batch_idx_map=batch_idx_map,
do_condition_on_prev_tokens=do_condition_on_prev_tokens,
prompt_ids=prompt_ids,
generation_config=generation_config,
config=self.config,
device=init_tokens.device,
suppress_tokens=suppress_tokens,
timestamp_begin=timestamp_begin,
kwargs=kwargs,
)
# 6.4 set max new tokens or max length
self._set_max_new_tokens_and_length(
config=self.config,
decoder_input_ids=decoder_input_ids,
generation_config=generation_config,
)
# 6.5 Set current `begin_index` for all logit processors
if logits_processor is not None:
for proc in logits_processor:
if hasattr(proc, "set_begin_index"):
proc.set_begin_index(decoder_input_ids.shape[-1])
# 6.6 Run generate with fallback
(
seek_sequences,
seek_outputs,
should_skip,
do_condition_on_prev_tokens,
model_output_type,
) = self.generate_with_fallback(
segment_input=segment_input,
decoder_input_ids=decoder_input_ids,
cur_bsz=cur_bsz,
seek=seek,
batch_idx_map=batch_idx_map,
temperatures=temperatures,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
return_token_timestamps=return_token_timestamps,
do_condition_on_prev_tokens=do_condition_on_prev_tokens,
is_shortform=is_shortform,
batch_size=batch_size,
attention_mask=attention_mask,
kwargs=kwargs,
)
# 6.7 In every generated sequence, split by timestamp tokens and extract segments
for i, seek_sequence in enumerate(seek_sequences):
prev_i = batch_idx_map[i]
if should_skip[i]:
seek[prev_i] += seek_num_frames[prev_i]
continue
segments, segment_offset = self._retrieve_segment(
seek_sequence=seek_sequence,
seek_outputs=seek_outputs,
time_offset=time_offset,
timestamp_begin=timestamp_begin,
seek_num_frames=seek_num_frames,
time_precision=time_precision,
time_precision_features=time_precision_features,
input_stride=input_stride,
prev_idx=prev_i,
idx=i,
return_token_timestamps=return_token_timestamps,
decoder_input_ids=decoder_input_ids,
)
seek[prev_i] += segment_offset
current_segments[prev_i] += segments
if force_unique_generate_call:
break
# 7. Once all segments are added to the list of all segments, called `current_segments`, we extract the predicted
# output tokens from the list of dicts. If we use batch size > 1, we make sure to pad the output
final_segments = (
[x[1:] for x in current_segments]
if (prompt_ids is not None and generation_config.prompt_condition_type == "first-segment")
else current_segments
)
# if return_dict_in_generate=True and we forced a unique call to generate or return_timestamps=False, meaning we are sure only one call to generate has been made,
# -> we can return a ModelOutput
# otherwise, return_dict_in_generate is applied in the 'result' of each segment in final_segments
if (
return_dict_in_generate
and generation_config.return_dict_in_generate
and (force_unique_generate_call or not return_timestamps)
):
# only one call to generate_with_fallback, we can return a ModelOutput
outputs = self._stack_split_outputs(seek_outputs, model_output_type, self.device, kwargs)
if num_return_sequences > 1:
if hasattr(outputs, "encoder_attentions") and outputs.encoder_attentions is not None:
outputs.encoder_attentions = tuple(
outputs.encoder_attentions[i][::num_return_sequences]
for i in range(len(outputs.encoder_attentions))
)
if hasattr(outputs, "encoder_hidden_states") and outputs.encoder_hidden_states is not None:
outputs.encoder_hidden_states = tuple(
outputs.encoder_hidden_states[i][::num_return_sequences]
for i in range(len(outputs.encoder_hidden_states))
)
return outputs
padded_outputs = _pad_to_max_length(
current_segments=final_segments,
pad_token_id=generation_config.pad_token_id,
device=self.device,
padding_side="right",
return_token_timestamps=return_token_timestamps,
force_unique_generate_call=force_unique_generate_call,
)
if return_dict_in_generate and generation_config.return_dict_in_generate:
logger.warning_once(
"You have passed `return_dict_in_generate=True` and `return_timestamps=True`, this automatically sets `return_segments=True` to access the results of the underlying calls to GenerationMixin's generate in the returned `segments`."
)
return_segments = True
elif not return_segments and not return_token_timestamps:
return padded_outputs
if return_token_timestamps:
sequences, token_timestamps = padded_outputs
outputs = {
"sequences": sequences,
"token_timestamps": token_timestamps,
}
else:
sequences = padded_outputs
outputs = {
"sequences": sequences,
}
if return_segments:
outputs["segments"] = final_segments
return outputs
def generate_with_fallback(
self,
segment_input,
decoder_input_ids,
cur_bsz,
seek,
batch_idx_map,
temperatures,
generation_config,
logits_processor,
stopping_criteria,
prefix_allowed_tokens_fn,
synced_gpus,
return_token_timestamps,
do_condition_on_prev_tokens,
is_shortform,
batch_size,
attention_mask,
kwargs,
):
kwargs = copy.copy(kwargs)
# 6.6 Batch generate current chunk
seek_sequence_list = [None for _ in range(cur_bsz)]
seek_outputs_list = [None for _ in range(cur_bsz)]
needs_fallback = [False for _ in range(cur_bsz)]
should_skip = [False for _ in range(cur_bsz)]
fallback_index_map = list(range(cur_bsz))
if generation_config.no_speech_threshold is not None:
self._setup_no_speech_detection(logits_processor, segment_input, decoder_input_ids, kwargs)
for fallback_idx, temperature in enumerate(temperatures):
generation_config.do_sample = temperature is not None and temperature > 0.0
generation_config.temperature = temperature if generation_config.do_sample else 1.0
if generation_config.do_sample:
generation_config.num_beams = 1
generate_kwargs = copy.copy(kwargs)
for key in ["do_sample", "temperature", "num_beams"]:
if key in generate_kwargs:
del generate_kwargs[key]
cur_bsz = decoder_input_ids.shape[0]
if generation_config.cache_implementation == "static" and cur_bsz < batch_size:
segment_input = F.pad(segment_input, (0, 0, 0, 0, 0, batch_size - cur_bsz), value=0)
decoder_input_ids = F.pad(
decoder_input_ids, (0, 0, 0, batch_size - cur_bsz), value=generation_config.pad_token_id
)
if generate_kwargs.get("decoder_attention_mask") is not None:
generate_kwargs["decoder_attention_mask"] = F.pad(
generate_kwargs["decoder_attention_mask"], (0, 0, 0, batch_size - cur_bsz), value=True
)
if generate_kwargs.get("encoder_outputs") is not None:
generate_kwargs["encoder_outputs"] = F.pad(
generate_kwargs["encoder_outputs"], (0, 0, 0, 0, 0, batch_size - cur_bsz), value=0
)
seek_outputs = super().generate(
segment_input,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
**generate_kwargs,
)
model_output_type = type(seek_outputs)
# post-process sequence tokens and outputs to be in list form
seek_sequences, seek_outputs = self._postprocess_outputs(
seek_outputs=seek_outputs,
decoder_input_ids=decoder_input_ids,
return_token_timestamps=return_token_timestamps,
generation_config=generation_config,
is_shortform=is_shortform,
seek=seek,
batch_idx_map=batch_idx_map,
)
if cur_bsz < batch_size:
seek_sequences = seek_sequences[:cur_bsz]
seek_outputs = seek_outputs[:cur_bsz]
# 6.7 Extract cut sequences from every sequence and check if fallback should be applied
# Loop over each decoded audio individually as each decoding can be of a different length
new_fallback_index_map = []
new_segment_input = []
new_decoder_input_ids = []
new_decoder_attention_mask = []
for i, seek_sequence in enumerate(seek_sequences):
# remove all padding tokens, except for the eos token
if seek_sequence[-1] == generation_config.pad_token_id:
num_paddings = (seek_sequence == generation_config.pad_token_id).sum()
if generation_config.pad_token_id == generation_config.eos_token_id:
# we do not remove the eos token id since it is needed for avg logprob calculation in _need_fallback
num_paddings -= 1
if num_paddings != 0:
seek_sequence = seek_sequence[:-num_paddings]
# check which sequences in batch need fallback & which should be skipped
needs_fallback[i], should_skip[i] = self._need_fallback(
seek_sequence,
seek_outputs,
i,
logits_processor,
generation_config,
self.config.vocab_size,
temperature,
)
# remove eos token
if seek_sequence[-1] == generation_config.eos_token_id:
seek_sequence = seek_sequence[:-1]
seek_sequence_list[fallback_index_map[i]] = seek_sequence
seek_outputs_list[fallback_index_map[i]] = seek_outputs[i]
is_low_temperature = temperature is None or temperature < 0.5
do_condition_on_prev_tokens[fallback_index_map[i]] = (
generation_config.condition_on_prev_tokens and is_low_temperature
)
if needs_fallback[i]:
new_fallback_index_map.append(fallback_index_map[i])
new_segment_input.append(segment_input[i])
new_decoder_input_ids.append(decoder_input_ids[i])
if "decoder_attention_mask" in kwargs:
new_decoder_attention_mask.append(kwargs["decoder_attention_mask"][i])
fallback_index_map = new_fallback_index_map
# if no sequence needs to be run with temperature fallback, we're finished
if len(fallback_index_map) == 0 or fallback_idx == len(temperatures) - 1:
seek_sequences = seek_sequence_list
seek_outputs = seek_outputs_list
break
# if we're still in the loop, make sure that decoder_input_ids and segment inputs are tensors
decoder_input_ids = torch.stack(new_decoder_input_ids)
segment_input = torch.stack(new_segment_input)
if "decoder_attention_mask" in kwargs:
kwargs["decoder_attention_mask"] = torch.stack(new_decoder_attention_mask)
return seek_sequences, seek_outputs, should_skip, do_condition_on_prev_tokens, model_output_type
@staticmethod
def _prepare_segments(prompt_ids, batch_size, generation_config):
if prompt_ids is not None and generation_config.prompt_condition_type == "first-segment":
prev_sot_token_id = getattr(generation_config, "prev_sot_token_id", None)
prompt_ids = prompt_ids[1:] if prompt_ids[0] == prev_sot_token_id else prompt_ids
current_segments = [[{"tokens": prompt_ids}] for _ in range(batch_size)]
else:
current_segments = [[] for _ in range(batch_size)]
return current_segments
def _postprocess_outputs(
self,
seek_outputs,
decoder_input_ids,
return_token_timestamps,
generation_config,
is_shortform,
seek,
batch_idx_map,
):
# remove all previously passed decoder input ids
# should happen only if it is the first generated segment
start_idx = decoder_input_ids.shape[-1]
if isinstance(seek_outputs, torch.Tensor):
return seek_outputs[:, start_idx:], seek_outputs
if return_token_timestamps and hasattr(generation_config, "alignment_heads"):
num_frames = getattr(generation_config, "num_frames")
if num_frames is not None:
num_frames = num_frames - seek
num_frames = num_frames[batch_idx_map]
seek_outputs["token_timestamps"] = self._extract_token_timestamps(
seek_outputs,
generation_config.alignment_heads,
num_frames=num_frames,
num_input_ids=decoder_input_ids.shape[-1],
)
def split_by_batch_index(values, key, batch_idx, is_shortform, beam_indices=None):
if beam_indices is not None and key == "scores":
return [v[beam_idx].cpu() for (v, beam_idx) in zip(values, beam_indices[batch_idx][: len(values)])]
if key in ["scores", "encoder_attentions", "encoder_hidden_states", "logits"]:
return [v[batch_idx].cpu() for v in values]
if key in ["decoder_attentions", "decoder_hidden_states", "cross_attentions"]:
return tuple(tuple(w[batch_idx][None].cpu() for w in v) for v in values)
elif key == "past_key_values":
if not is_shortform:
# we don't save `past_key_values` as this is too costly for longform
return None
all_past_key_values = []
for layer_idx in range(self.config.decoder_layers):
layer_cache = (
values.self_attention_cache.layers[layer_idx].keys[batch_idx][None].cpu(),
values.self_attention_cache.layers[layer_idx].values[batch_idx][None].cpu(),
values.cross_attention_cache.layers[layer_idx].keys[batch_idx][None].cpu(),
values.cross_attention_cache.layers[layer_idx].values[batch_idx][None].cpu(),
)
all_past_key_values.append(layer_cache)
return EncoderDecoderCache(all_past_key_values)
return values[batch_idx].cpu()
sequence_tokens = seek_outputs["sequences"][:, start_idx:]
seek_outputs = [
{
k: split_by_batch_index(v, k, i, is_shortform, beam_indices=seek_outputs.get("beam_indices"))
for k, v in seek_outputs.items()
}
for i in range(sequence_tokens.shape[0])
]
return sequence_tokens, seek_outputs
def _stack_split_outputs(self, seek_outputs, model_output_type, device, kwargs):
# Stack back seek_outputs tensors after splitting them with the split_by_batch_index method
outputs = {}
for key in seek_outputs[0]:
if key in ["sequences", "beam_indices", "token_timestamps"]:
outputs[key] = torch.stack([v[key] for v in seek_outputs], dim=0).to(device)
elif key in ["scores", "encoder_attentions", "encoder_hidden_states", "logits"]:
outputs[key] = tuple(
torch.stack([v[key][i] for v in seek_outputs]).to(device) for i in range(len(seek_outputs[0][key]))
)
elif key == "sequences_scores":
outputs[key] = torch.stack([v[key] for v in seek_outputs], dim=0).to(device)
elif key in ["decoder_attentions", "decoder_hidden_states", "cross_attentions"]:
outputs[key] = tuple(
tuple(
torch.stack([v[key][i][j] for v in seek_outputs]).squeeze(1).to(device)
for j in range(len(seek_outputs[0][key][0]))
)
for i in range(len(seek_outputs[0][key]))
)
elif key == "past_key_values":
if seek_outputs[0][key] is not None:
all_past_key_values = []
for layer_idx in range(len(seek_outputs[0][key])):
self_attention_k, self_attention_v, cross_attention_k, cross_attention_v = (
torch.stack(
[
getattr(getattr(sub_output[key], sub_cache).layers[layer_idx], sub_key)
for sub_output in seek_outputs
]
)
.squeeze(1)
.to(device)
for sub_cache in ["self_attention_cache", "cross_attention_cache"]
for sub_key in ["keys", "values"]
)
all_past_key_values.append(
(self_attention_k, self_attention_v, cross_attention_k, cross_attention_v)
)
outputs[key] = EncoderDecoderCache(tuple(all_past_key_values))
else:
outputs[key] = None
token_timestamps = outputs.get("token_timestamps")
if token_timestamps is not None:
model_output_type = dict
return model_output_type(**outputs)
def _need_fallback(
self,
seek_sequence,
seek_outputs,
index,
logits_processor,
generation_config,
vocab_size,
temperature,
):
needs_fallback = False
should_skip = False
if generation_config.compression_ratio_threshold is not None:
compression_ratio = self._retrieve_compression_ratio(seek_sequence, vocab_size)
if compression_ratio > generation_config.compression_ratio_threshold:
needs_fallback = True
if generation_config.logprob_threshold is not None:
if hasattr(seek_outputs[0], "sequences_scores"):
logprobs = [s["sequences_scores"] for s in seek_outputs][index]
else:
scores = seek_outputs[index]["scores"]
logprobs = self._retrieve_avg_logprobs(
scores,
seek_sequence,
temperature,
)
if logprobs < generation_config.logprob_threshold:
needs_fallback = True
if generation_config.no_speech_threshold is not None:
no_speech_prob = _get_attr_from_logit_processors(
logits_processor, WhisperNoSpeechDetection, "no_speech_prob"
)
if (
logprobs < generation_config.logprob_threshold
and no_speech_prob[index] > generation_config.no_speech_threshold
):
needs_fallback = False
should_skip = True
return needs_fallback, should_skip
def _expand_variables_for_generation(
self, input_features, seek, max_frames, init_tokens, batch_size, condition_on_prev_tokens, generation_config
):
if generation_config.num_return_sequences is not None and generation_config.num_return_sequences > 1:
batch_idx_map = list(range(batch_size * generation_config.num_return_sequences))
cur_bsz = len(batch_idx_map)
do_condition_on_prev_tokens = [condition_on_prev_tokens for _ in range(len(batch_idx_map))]
input_features = input_features.repeat_interleave(generation_config.num_return_sequences, dim=0)
seek = seek.repeat_interleave(generation_config.num_return_sequences, dim=0)
max_frames = max_frames.repeat_interleave(generation_config.num_return_sequences, dim=0)
init_tokens = init_tokens.repeat_interleave(generation_config.num_return_sequences, dim=0)
generation_config.num_return_sequences = 1
else:
cur_bsz = batch_size
batch_idx_map = list(range(cur_bsz))
do_condition_on_prev_tokens = [condition_on_prev_tokens for _ in range(cur_bsz)]
return (
batch_idx_map,
cur_bsz,
input_features,
seek,
max_frames,
init_tokens,
do_condition_on_prev_tokens,
)
@staticmethod
def _setup_no_speech_detection(logits_processor, segment_input, decoder_input_ids, kwargs):
set_inputs = _get_attr_from_logit_processors(logits_processor, WhisperNoSpeechDetection, "set_inputs")
extra_kwargs = {k: v for k, v in kwargs.items() if torch.is_tensor(v)}
set_inputs({"inputs": segment_input, "input_ids": decoder_input_ids, **extra_kwargs})
@staticmethod
def _retrieve_total_input_frames(input_features, input_stride, kwargs):
if input_features is not None:
return input_features.shape[0], input_features.shape[-1]
if "encoder_outputs" in kwargs:
encoder_outputs_shape = (
kwargs["encoder_outputs"][0].shape
if isinstance(kwargs["encoder_outputs"], BaseModelOutput)
else kwargs["encoder_outputs"].shape
)
return encoder_outputs_shape[0], encoder_outputs_shape[1] * input_stride
raise ValueError("Make sure to provide either `input_features` or `encoder_outputs` to `generate`.")
@staticmethod
def _maybe_warn_unused_inputs(
condition_on_prev_tokens,
temperature,
compression_ratio_threshold,
logprob_threshold,
no_speech_threshold,
total_input_frames,
):
warning_prefix = (
f"Audio input consists of only {total_input_frames}. "
"Short-form transcription is activated."
"{}, but will be ignored."
)
if condition_on_prev_tokens is not None:
logger.warning(warning_prefix.format(f"condition_on_prev_tokens is set to {condition_on_prev_tokens}"))
if compression_ratio_threshold is not None:
logger.warning(
warning_prefix.format(f"compression_ratio_threshold is set to {compression_ratio_threshold}")
)
if logprob_threshold is not None:
logger.warning(warning_prefix.format(f"logprob_threshold is set to {logprob_threshold}"))
if no_speech_threshold is not None:
logger.warning(warning_prefix.format(f"no_speech_threshold is set to {no_speech_threshold}"))
@staticmethod
def _set_return_outputs(return_dict_in_generate, return_token_timestamps, logprob_threshold, generation_config):
if return_dict_in_generate is None:
return_dict_in_generate = generation_config.return_dict_in_generate
else:
generation_config.return_dict_in_generate = return_dict_in_generate
generation_config.return_token_timestamps = return_token_timestamps
if return_token_timestamps:
generation_config.return_dict_in_generate = True
generation_config.output_attentions = True
generation_config.output_scores = True
if logprob_threshold is not None:
generation_config.return_dict_in_generate = True
generation_config.output_scores = True
return return_dict_in_generate
def _set_return_timestamps(self, return_timestamps, is_shortform, generation_config):
if return_timestamps is None and hasattr(generation_config, "return_timestamps"):
return_timestamps = generation_config.return_timestamps
if not is_shortform:
if return_timestamps is False:
raise ValueError(
"You have passed more than 3000 mel input features (> 30 seconds) which automatically "
"enables long-form generation which requires the model to predict timestamp tokens. Please "
"either pass `return_timestamps=True` or make sure to pass no more than 3000 mel input features."
)
logger.info("Setting `return_timestamps=True` for long-form generation.")
return_timestamps = True
if return_timestamps and not hasattr(generation_config, "no_timestamps_token_id"):
raise ValueError(
"You are trying to return timestamps, but the generation config is not properly set. "
"Make sure to initialize the generation config with the correct attributes that are needed such as "
"`no_timestamps_token_id`. For more details on how to generate the approtiate config, refer to "
"https://github.com/huggingface/transformers/issues/21878#issuecomment-1451902363"
)
generation_config.return_timestamps = return_timestamps
if hasattr(generation_config, "no_timestamps_token_id"):
timestamp_begin = generation_config.no_timestamps_token_id + 1
else:
# BC for models missing the `no_timestamps_token_id` in the generation config when generating short-form
# with no timestamps. We set the timestamp begin token larger than the vocab size, such that the
# timestamp condition is never met in the decoding loop
timestamp_begin = self.config.vocab_size + 1
return timestamp_begin
@staticmethod
def _set_language_and_task(language, task, is_multilingual, generation_config):
if is_multilingual is not None:
if not hasattr(generation_config, "is_multilingual"):
raise ValueError(
"The generation config is outdated and is thus not compatible with the `is_multilingual` argument "
"to `generate`. Please update the generation config as per the instructions "
"https://github.com/huggingface/transformers/issues/25084#issuecomment-1664398224"
)
generation_config.is_multilingual = is_multilingual
if hasattr(generation_config, "is_multilingual") and not generation_config.is_multilingual:
if task is not None or language is not None:
raise ValueError(
"Cannot specify `task` or `language` for an English-only model. If the model is intended to be "
"multilingual, pass `is_multilingual=True` to generate, or update the generation config."
)
if language is not None:
if not hasattr(generation_config, "lang_to_id"):
raise ValueError(
"The generation config is outdated and is thus not compatible with the `language` argument "
"to `generate`. Please update the generation config as per the instructions "
"https://github.com/huggingface/transformers/issues/25084#issuecomment-1664398224"
)
generation_config.language = language
if task is not None:
if not hasattr(generation_config, "task_to_id"):
raise ValueError(
"The generation config is outdated and is thus not compatible with the `task` argument "
"to `generate`. Please update the generation config as per the instructions "
"https://github.com/huggingface/transformers/issues/25084#issuecomment-1664398224"
)
generation_config.task = task
def _retrieve_init_tokens(self, input_features, batch_size, generation_config, config, num_segment_frames, kwargs):
def replace_or_add(lst: list[int], num: int, itr: Iterator[int]):
"""short function to replace num with a itr in lst"""
found = any(i in lst for i in itr)
if found:
lst = [num if i in itr else i for i in lst]
else:
lst.append(num)
return lst
def language_to_id(language: str) -> int:
language = language.lower()
if language in generation_config.lang_to_id:
language_token = language
elif language in TO_LANGUAGE_CODE:
language_token = f"<|{TO_LANGUAGE_CODE[language]}|>"
elif language in TO_LANGUAGE_CODE.values():
language_token = f"<|{language}|>"
else:
is_language_code = len(language) == 2
raise ValueError(
f"Unsupported language: {language}. Language should be one of:"
f" {list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys())}."
)
if language_token not in generation_config.lang_to_id:
raise ValueError(
f"{language_token} is not supported by this specific model as it is not in the "
"`generation_config.lang_to_id`. (You should just add it to the generation config)"
)
return generation_config.lang_to_id[language_token]
task = getattr(generation_config, "task", None)
language = getattr(generation_config, "language", None)
init_tokens = [generation_config.decoder_start_token_id]
# TL;DR we silently ignore `forced_decoder_ids` (old flag) when `task` or `language` (new flags) are set.
# `forced_decoder_ids` is an old generation config attribute that is now deprecated in favor of `task` and
# `language` (see https://github.com/huggingface/transformers/pull/28687). Nevertheless, keep in mind that
# the original checkpoints all contain this attribute, and thus we should maintain backwards compatibility.
if task is None and language is None:
forced_decoder_ids = getattr(generation_config, "forced_decoder_ids", None)
# fallback: check the model config for forced_decoder_ids
if forced_decoder_ids is None and getattr(config, "forced_decoder_ids", None) is not None:
forced_decoder_ids = config.forced_decoder_ids
if forced_decoder_ids is not None:
logger.warning_once(
"Using custom `forced_decoder_ids` from the (generation) config. This is deprecated in favor of "
"the `task` and `language` flags/config options."
)
if forced_decoder_ids is not None and forced_decoder_ids[0][1] is None:
logger.warning_once(
"Transcription using a multilingual Whisper will default to language detection followed by "
"transcription instead of translation to English. This might be a breaking change for your "
"use case. If you want to instead always translate your audio to English, make sure to pass "
"`language='en'`. See https://github.com/huggingface/transformers/pull/28687 for more details."
)
if forced_decoder_ids is not None and forced_decoder_ids[0][0] == 1:
i = 1
while len(forced_decoder_ids) > 0 and forced_decoder_ids[0][0] == i:
init_tokens += [forced_decoder_ids[0][1]]
forced_decoder_ids = forced_decoder_ids[1:]
i += 1
if len(forced_decoder_ids) > 0:
raise ValueError(
f"You are using token ids in `forced_decoder_ids` that do not seem to correctly follow "
f"the prompt pattern of Whisper. Make sure that {forced_decoder_ids} has an entry for all "
f"indices >= 1 and < {forced_decoder_ids[0][0]}.",
)
is_lang_id_undefined = len(init_tokens) <= 1 or (len(init_tokens) > 1 and init_tokens[1] is None)
# Make sure language is a list of strings of the correct length
if isinstance(language, (list, tuple)):
if any(l is None for l in language):
raise TypeError(
"Expected `language` to be `None`, a single string (e.g. `'en'`), or a list of strings with "
"length equal to the batch size (e.g. `('en', 'fr')` for a batch size of 2). Got a list "
"containing `None`."
)
if len(language) != batch_size:
raise ValueError(
"When passing a list of languages, the length of the list must match the batch size. "
f"Expected length of {batch_size}, but got {len(language)} languages."
)
languages = language
elif language is None:
# Language will be detected for each item in batch
languages = [None] * batch_size
else:
languages = [language] # Use a length-1 list now, broadcast later
# Separate init_tokens for each language
init_tokens = [copy.copy(init_tokens) for _ in languages]
# Update init_tokens with languages
lang_ids = None
if language is not None:
lang_ids = [language_to_id(l) for l in languages]
elif hasattr(generation_config, "lang_to_id") and is_lang_id_undefined:
# language is not defined or intentionally set to `None` to trigger language detection
lang_ids = self.detect_language(
input_features=input_features,
encoder_outputs=kwargs.get("encoder_outputs", None),
generation_config=generation_config,
num_segment_frames=num_segment_frames,
).tolist()
if lang_ids is not None:
# append or replace lang_ids to init_tokens
for i in range(len(init_tokens)):
if len(init_tokens[i]) > 1:
init_tokens[i][1] = lang_ids[i]
else:
init_tokens[i].append(lang_ids[i])
del languages
# Update init_tokens with task
for i in range(len(init_tokens)):
if task is not None:
if task in TASK_IDS:
init_tokens[i].append(generation_config.task_to_id[generation_config.task])
task_id = generation_config.task_to_id[generation_config.task]
# if task is defined it'll overwrite task ids that might have already been defined via the generation_config
replace_or_add(init_tokens[i], task_id, generation_config.task_to_id.values())
else:
raise ValueError(f"The `{task}` task is not supported. The task should be one of `{TASK_IDS}`")
elif language is not None and hasattr(generation_config, "task_to_id"):
# if language is defined, but no task id is in `init_tokens`, default to transcribe
if not any(ti in init_tokens[i] for ti in generation_config.task_to_id.values()):
init_tokens[i].append(generation_config.task_to_id["transcribe"])
if (
not generation_config.return_timestamps
and hasattr(generation_config, "no_timestamps_token_id")
and init_tokens[i][-1] != generation_config.no_timestamps_token_id
):
init_tokens[i].append(generation_config.no_timestamps_token_id)
elif (
generation_config.return_timestamps and init_tokens[i][-1] == generation_config.no_timestamps_token_id
):
logger.info(
"<|notimestamps|> prompt token is removed from generation_config since `return_timestamps` is set to `'True'`."
)
init_tokens[i] = init_tokens[i][:-1]
# let's make sure we don't pass `None` tokens as prompt tokens
init_tokens[i] = [t for t in init_tokens[i] if t is not None]
return torch.as_tensor(init_tokens, dtype=torch.long, device=self.device).expand(batch_size, -1)
def detect_language(
self,
input_features: Optional[torch.FloatTensor] = None,
encoder_outputs: Optional[Union[torch.FloatTensor, BaseModelOutput]] = None,
generation_config: Optional[GenerationConfig] = None,
num_segment_frames: int = 3000,
) -> torch.Tensor:
"""
Detects language from log-mel input features or encoder_outputs
Parameters:
input_features (`torch.Tensor` of shape `(batch_size, feature_size, sequence_length)`, *optional*):
Float values of log-mel features extracted from the raw speech waveform. The raw speech waveform can be obtained by
loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via
the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
[`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] for details.
encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
`last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
num_segment_frames (`int`, *optional*, defaults to 3000):
The number of log-mel frames the model expects
Return:
A `torch.LongTensor` representing the detected language ids.
"""
if input_features is None and encoder_outputs is None:
raise ValueError("You have to specify either `input_features` or `encoder_outputs`")
elif input_features is not None and encoder_outputs is not None:
raise ValueError("Make sure to specify only one of `input_features` or `encoder_outputs` - not both!")
elif input_features is not None:
inputs = {"input_features": input_features[:, :, :num_segment_frames]}
batch_size = input_features.shape[0]
elif encoder_outputs is not None:
inputs = {"encoder_outputs": encoder_outputs}
batch_size = (
encoder_outputs[0].shape[0] if isinstance(encoder_outputs, BaseModelOutput) else encoder_outputs[0]
)
generation_config = generation_config or self.generation_config
decoder_input_ids = (
torch.ones((batch_size, 1), device=self.device, dtype=torch.long)
* generation_config.decoder_start_token_id
)
with torch.no_grad():
logits = self(**inputs, decoder_input_ids=decoder_input_ids, use_cache=False).logits[:, -1]
non_lang_mask = torch.ones_like(logits[0], dtype=torch.bool)
non_lang_mask[list(generation_config.lang_to_id.values())] = False
logits[:, non_lang_mask] = -np.inf
lang_ids = logits.argmax(-1)
return lang_ids
@staticmethod
def _check_decoder_input_ids(kwargs):
decoder_input_ids = kwargs.get("decoder_input_ids", None)
assistant_model = kwargs.get("assistant_model", None)
if decoder_input_ids is not None and assistant_model is not None:
raise ValueError(
"Passing `decoder_input_ids` is deprecated. Consider passing `prompt_ids` instead.",
)
@staticmethod
def _set_num_frames(return_token_timestamps, generation_config, attention_mask, kwargs):
if return_token_timestamps:
if getattr(generation_config, "task", None) == "translate":
logger.warning("Token-level timestamps may not be reliable for task 'translate'.")
if not hasattr(generation_config, "alignment_heads"):
raise ValueError(
"Model generation config has no `alignment_heads`, token-level timestamps not available. "
"See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config."
)
if "num_frames" in kwargs:
generation_config.num_frames = kwargs.pop("num_frames")
if isinstance(generation_config.num_frames, torch.Tensor):
generation_config.num_frames = generation_config.num_frames.cpu()
else:
generation_config.num_frames = torch.tensor(generation_config.num_frames)
logger.warning_once(
"`num_frames` is deprecated and will be removed in Transformers v5. Use `attention_mask` instead, as it can be used to infer the number of frames. "
"You can retrieve the `attention_mask` by doing `processor(audio, ..., return_attention_mask=True"
)
elif attention_mask is not None:
generation_config.num_frames = attention_mask.sum(-1).cpu()
else:
logger.warning_once(
"When setting `return_token_timestamps` to `True`, make sure to pass an `attention_mask` to get precise token-level timestamps. You can retrieve the `attention_mask` by doing `processor(audio, ..., return_attention_mask=True)` "
)
generation_config.num_frames = None
@staticmethod
def _set_thresholds_and_condition(
generation_config,
logprob_threshold,
compression_ratio_threshold,
no_speech_threshold,
condition_on_prev_tokens,
):
generation_config.logprob_threshold = (
logprob_threshold
if logprob_threshold is not None
else getattr(generation_config, "logprob_threshold", None)
)
generation_config.compression_ratio_threshold = (
compression_ratio_threshold
if compression_ratio_threshold is not None
else getattr(generation_config, "compression_ratio_threshold", None)
)
generation_config.no_speech_threshold = (
no_speech_threshold
if no_speech_threshold is not None
else getattr(generation_config, "no_speech_threshold", None)
)
generation_config.condition_on_prev_tokens = (
condition_on_prev_tokens
if condition_on_prev_tokens is not None
else getattr(generation_config, "condition_on_prev_tokens", None)
)
@staticmethod
def _set_prompt_condition_type(generation_config, prompt_condition_type):
allowed_cond_types = ["first-segment", "all-segments"]
# default to "first-segment"
prompt_condition_type = prompt_condition_type or allowed_cond_types[0]
if prompt_condition_type not in allowed_cond_types:
raise ValueError(
f"`prompt_condition_type={prompt_condition_type} does not exist. Make sure to set `prompt_condition_type` to one of {', '.join(allowed_cond_types)}"
)
if generation_config.condition_on_prev_tokens is not True and prompt_condition_type == "all-segments":
raise ValueError(
"Make sure to set `condition_on_prev_tokens=True` when setting `prompt_condition_type='all-segments'`."
)
generation_config.prompt_condition_type = prompt_condition_type
@staticmethod
def _set_condition_on_prev_tokens(condition_on_prev_tokens, generation_config):
condition_on_prev_tokens = (
condition_on_prev_tokens
if condition_on_prev_tokens is not None
else getattr(generation_config, "condition_on_prev_tokens", False)
)
generation_config.condition_on_prev_tokens = condition_on_prev_tokens
@staticmethod
def _retrieve_max_frames_and_seek(batch_size, attention_mask, total_input_frames, is_shortform):
if batch_size > 1 and not is_shortform and attention_mask is None:
raise ValueError(
"When doing batched long-form audio transcription, make sure to pass an `attention_mask`. You can retrieve the `attention_mask` by doing `processor(audio, ..., return_attention_mask=True)` "
)
elif batch_size > 1 and not is_shortform:
max_frames = attention_mask.sum(-1).cpu().to(torch.long)
seek = torch.zeros((batch_size,), dtype=torch.long)
else:
max_frames = torch.ones((batch_size,), dtype=torch.long) * total_input_frames
seek = torch.zeros((batch_size,), dtype=torch.long)
return max_frames, seek
def _retrieve_logit_processors(self, generation_config, logits_processor, begin_index, num_beams, device):
if generation_config.return_timestamps is True:
timestamp_processor = WhisperTimeStampLogitsProcessor(generation_config, begin_index=begin_index)
logits_processor = (
[timestamp_processor] if logits_processor is None else [timestamp_processor] + logits_processor
)
if generation_config.suppress_tokens is not None:
suppress_tokens_processor = SuppressTokensLogitsProcessor(generation_config.suppress_tokens, device=device)
logits_processor = (
[suppress_tokens_processor]
if logits_processor is None
else [suppress_tokens_processor] + logits_processor
)
generation_config.suppress_tokens = None
if generation_config.begin_suppress_tokens is not None:
begin_suppress_processor = SuppressTokensAtBeginLogitsProcessor(
generation_config.begin_suppress_tokens, begin_index=begin_index, device=device
)
logits_processor = (
[begin_suppress_processor]
if logits_processor is None
else [begin_suppress_processor] + logits_processor
)
generation_config.begin_suppress_tokens = None
if generation_config.no_speech_threshold is not None:
no_speech_detector = WhisperNoSpeechDetection(
no_speech_token=generation_config.no_timestamps_token_id - 1,
begin_index=begin_index,
scores_is_logprobs=num_beams > 1,
)
logits_processor = (
[no_speech_detector] if logits_processor is None else [no_speech_detector] + logits_processor
)
no_speech_detector.set_model(self)
return logits_processor
@staticmethod
def _maybe_reduce_batch(input_features, seek, max_frames, cur_bsz, batch_idx_map):
prev_bsz = cur_bsz
new_batch_idx_map = []
for i in range(prev_bsz):
prev_i = batch_idx_map[i]
if seek[prev_i] >= max_frames[prev_i]:
cut_index = i + (cur_bsz - prev_bsz)
cur_bsz -= 1
input_features = torch.cat([input_features[:cut_index], input_features[cut_index + 1 :]], dim=0)
else:
# cut out index that goes away
new_batch_idx_map.append(prev_i)
return input_features, cur_bsz, new_batch_idx_map
@staticmethod
def _get_input_segment(input_features, seek, seek_num_frames, num_segment_frames, cur_bsz, batch_idx_map):
if input_features is None:
return None
segment_input = []
for i in range(cur_bsz):
prev_i = batch_idx_map[i]
segment_input_slice = input_features[i : i + 1, :, seek[prev_i] : seek[prev_i] + seek_num_frames[prev_i]]
if segment_input_slice.shape[-1] < num_segment_frames:
# pad to 3000 if necessary
segment_input_slice = F.pad(
segment_input_slice, pad=(0, num_segment_frames - segment_input_slice.shape[-1])
)
segment_input.append(segment_input_slice)
segment_input = torch.cat(segment_input, dim=0)
return segment_input
@staticmethod
def _prepare_decoder_input_ids(
cur_bsz,
init_tokens,
current_segments,
batch_idx_map,
do_condition_on_prev_tokens,
prompt_ids,
generation_config,
config,
device,
suppress_tokens,
timestamp_begin,
kwargs,
):
if "decoder_input_ids" in kwargs:
decoder_input_ids = kwargs.pop("decoder_input_ids")
return decoder_input_ids, kwargs
cut_off_length = config.max_target_positions // 2 - 1
decoder_input_ids = init_tokens[batch_idx_map]
prev_start_of_text = getattr(generation_config, "prev_sot_token_id", None)
if prev_start_of_text is None:
if suppress_tokens is not None and len(suppress_tokens) >= 2:
prev_start_of_text = suppress_tokens[-2]
else:
prev_start_of_text = None
if any(do_condition_on_prev_tokens) and len(current_segments[0]) > 0:
# according to https://github.com/openai/whisper/blob/e58f28804528831904c3b6f2c0e473f346223433/whisper/decoding.py#L609
active_segments = [current_segments[i] if do_condition_on_prev_tokens[i] else None for i in batch_idx_map]
if prompt_ids is not None and generation_config.prompt_condition_type == "all-segments":
prev_ids = prompt_ids
else:
one_tensor = torch.ones((cur_bsz, 1), device=device, dtype=torch.long)
prev_ids = prev_start_of_text * one_tensor[0] if prev_start_of_text is not None else None
padding = "max_length" if generation_config.cache_implementation == "static" else "longest"
prev_tokens = _pad_to_max_length(
active_segments,
generation_config.pad_token_id,
device=device,
padding_side="left",
padding=padding,
bos_token_tensor=prev_ids,
cut_off_length=cut_off_length,
skip_ending_double_timestamps=True,
timestamp_begin=timestamp_begin,
)
decoder_input_ids = torch.cat([prev_tokens, decoder_input_ids], dim=-1)
kwargs["decoder_attention_mask"] = decoder_input_ids != generation_config.pad_token_id
elif prompt_ids is not None:
prev_tokens = prompt_ids[None].repeat(decoder_input_ids.shape[0], 1)
decoder_input_ids = torch.cat([prev_tokens, decoder_input_ids], dim=-1)
# make sure `"decoder_attention_mask"` is not passed to forward
kwargs.pop("decoder_attention_mask", None)
else:
# make sure `"decoder_attention_mask"` is not passed to forward
kwargs.pop("decoder_attention_mask", None)
return decoder_input_ids, kwargs
def _set_max_new_tokens_and_length(self, config, decoder_input_ids, generation_config):
max_new_tokens = generation_config.max_new_tokens if generation_config.max_new_tokens is not None else 0
if max_new_tokens + decoder_input_ids.shape[-1] > self.config.max_target_positions:
raise ValueError(
f"The length of `decoder_input_ids`, including special start tokens, prompt tokens, and previous tokens, is {decoder_input_ids.shape[-1]}, "
f" and `max_new_tokens` is {max_new_tokens}. Thus, the combined length of "
f"`decoder_input_ids` and `max_new_tokens` is: {max_new_tokens + decoder_input_ids.shape[-1]}. This exceeds the "
f"`max_target_positions` of the Whisper model: {self.config.max_target_positions}. "
"You should either reduce the length of your prompt, or reduce the value of `max_new_tokens`, "
f"so that their combined length is less than {self.config.max_target_positions}."
)
num_initial_tokens = min(config.max_target_positions // 2 - 1, decoder_input_ids.shape[-1] - 1)
# Make sure we don't get larger than `max_length`
if generation_config.max_length is not None and generation_config.max_new_tokens is None:
max_length = min(generation_config.max_length + num_initial_tokens, config.max_target_positions)
logger.info(
f"Increase max_length from {generation_config.max_length} to {max_length} since input is conditioned on previous segment."
)
elif (
generation_config.max_new_tokens is not None
and generation_config.max_new_tokens + decoder_input_ids.shape[-1] > config.max_target_positions
):
max_new_tokens = config.max_target_positions - decoder_input_ids.shape[-1]
generation_config.max_new_tokens = max_new_tokens
@staticmethod
def _retrieve_compression_ratio(tokens, vocab_size):
"""Compute byte length of zlib compressed token bytes vs. byte length of raw token bytes"""
length = int(math.log2(vocab_size) / 8) + 1
token_bytes = b"".join([t.to_bytes(length, "little") for t in tokens.tolist()])
compression_ratio = len(token_bytes) / len(zlib.compress(token_bytes))
return compression_ratio
@staticmethod
def _retrieve_avg_logprobs(scores, tokens, temperature):
rescale_temperature = temperature if temperature > 0.0 else 1
scores = torch.stack(scores).to(tokens.device)
if scores.shape[0] > tokens.shape[0]:
scores = scores[: tokens.shape[0]]
else:
tokens = tokens[-scores.shape[0] :]
logprobs = F.log_softmax((scores * rescale_temperature).float(), dim=-1).to(scores.dtype)
# retrieve logprob of selected tokens and sum
# don't remove the eos token logprob! it counts in avg_logprob calculation in the original implementation
sum_logprobs = sum(logprobs[i][tokens[i]] for i in range(logprobs.shape[0]))
avg_logprobs = sum_logprobs / len(tokens)
return avg_logprobs
@staticmethod
def _retrieve_segment(
seek_sequence,
seek_outputs,
time_offset,
timestamp_begin,
seek_num_frames,
time_precision,
time_precision_features,
input_stride,
prev_idx,
idx,
return_token_timestamps,
decoder_input_ids,
):
# find the predicted "end of segment" predictions of Whisper
# "end of segment" predictions occur whenever Whisper predicts a timestamp token
timestamp_tokens: torch.Tensor = seek_sequence.ge(timestamp_begin)
single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True]
timestamp_segment_indices = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0]
timestamp_segment_indices.add_(1)
token_timestamps = seek_outputs[idx]["token_timestamps"] if return_token_timestamps else []
idx_offset = decoder_input_ids.shape[-1]
device = seek_sequence.device
# If whisper predicted a "end of segment" via a timestep token, let's go ever each
# "end of segment" prediction and slice the decoding into segments accordingly
if len(timestamp_segment_indices) > 0:
# if the output contains two consecutive timestamp tokens
slices = timestamp_segment_indices.tolist()
segments = []
if single_timestamp_ending:
slices.append(len(seek_sequence))
else:
# we want to include the last timestamp token in the last segment to know it was no single ending
slices[-1] += 1
last_slice = 0
# Add each segment to list of all segments
for i, current_slice in enumerate(slices):
is_last_slice = i == len(slices) - 1
sliced_tokens = seek_sequence[last_slice:current_slice]
start_timestamp_pos = sliced_tokens[0] - timestamp_begin
idx_sliced_tokens = -1 if not is_last_slice or single_timestamp_ending else -2
end_timestamp_pos = sliced_tokens[idx_sliced_tokens] - timestamp_begin
segments.append(
{
"start": time_offset[prev_idx]
+ start_timestamp_pos.to(torch.float32 if device.type == "mps" else torch.float64)
* time_precision,
"end": time_offset[prev_idx]
+ end_timestamp_pos.to(torch.float32 if device.type == "mps" else torch.float64)
* time_precision,
"tokens": sliced_tokens,
"idxs": (idx_offset + last_slice, idx_offset + current_slice),
"result": seek_outputs[idx],
}
)
if return_token_timestamps:
segments[-1]["token_timestamps"] = (
token_timestamps[idx_offset + last_slice : idx_offset + current_slice] + time_offset[prev_idx]
)
last_slice = current_slice
if single_timestamp_ending:
# single timestamp at the end means no speech after the last timestamp.
segment_offset = seek_num_frames[prev_idx]
else:
# otherwise, ignore the unfinished segment and seek to the last timestamp
# here we throw away all predictions after the last predicted "end of segment"
# since we are cutting right in the middle of an audio
last_timestamp_pos = seek_sequence[last_slice - 2].item() - timestamp_begin
segment_offset = last_timestamp_pos * input_stride
else:
# If whisper does not predict any "end of segment" token, then
# the whole decoding is considered a segment and we add it to the list of segments
timestamps = seek_sequence[timestamp_tokens.nonzero().flatten()]
last_timestamp_pos = int(seek_num_frames[prev_idx] * time_precision_features / time_precision)
if timestamps.numel() > 0 and timestamps[-1] != timestamp_begin:
# no consecutive timestamps but it has a timestamp; use the last one.
last_timestamp_pos = (timestamps[-1] - timestamp_begin).to(
torch.float32 if device.type == "mps" else torch.float64
)
segments = [
{
"start": time_offset[prev_idx],
"end": time_offset[prev_idx] + last_timestamp_pos * time_precision,
"tokens": seek_sequence,
"idxs": (idx_offset, idx_offset + len(seek_sequence)),
"result": seek_outputs[idx],
}
]
if return_token_timestamps:
segments[-1]["token_timestamps"] = (
token_timestamps[idx_offset : idx_offset + len(seek_sequence)] + time_offset[prev_idx]
)
segment_offset = seek_num_frames[prev_idx]
return segments, segment_offset
| WhisperGenerationMixin |
python | getsentry__sentry | tests/sentry/rules/filters/test_issue_category.py | {
"start": 1715,
"end": 2324
} | class ____(
RuleTestCase,
SnubaTestCase,
PerformanceIssueTestCase,
):
rule_cls = IssueCategoryFilter
def test_transaction_category(self) -> None:
tx_event = self.create_performance_issue()
assert tx_event.group
self.assertPasses(self.get_rule(data={"value": GroupCategory.PERFORMANCE.value}), tx_event)
def test_transaction_category_v2(self) -> None:
tx_event = self.create_performance_issue()
assert tx_event.group
self.assertPasses(self.get_rule(data={"value": GroupCategory.DB_QUERY.value}), tx_event)
| IssueCategoryFilterPerformanceTest |
python | kamyu104__LeetCode-Solutions | Python/rle-iterator.py | {
"start": 29,
"end": 611
} | class ____(object):
def __init__(self, A):
"""
:type A: List[int]
"""
self.__A = A
self.__i = 0
self.__cnt = 0
def next(self, n):
"""
:type n: int
:rtype: int
"""
while self.__i < len(self.__A):
if n > self.__A[self.__i] - self.__cnt:
n -= self.__A[self.__i] - self.__cnt
self.__cnt = 0
self.__i += 2
else:
self.__cnt += n
return self.__A[self.__i+1]
return -1
| RLEIterator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 187467,
"end": 188572
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository_id",
"name",
"description",
"template",
"homepage_url",
"has_wiki_enabled",
"has_issues_enabled",
"has_projects_enabled",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
name = sgqlc.types.Field(String, graphql_name="name")
description = sgqlc.types.Field(String, graphql_name="description")
template = sgqlc.types.Field(Boolean, graphql_name="template")
homepage_url = sgqlc.types.Field(URI, graphql_name="homepageUrl")
has_wiki_enabled = sgqlc.types.Field(Boolean, graphql_name="hasWikiEnabled")
has_issues_enabled = sgqlc.types.Field(Boolean, graphql_name="hasIssuesEnabled")
has_projects_enabled = sgqlc.types.Field(Boolean, graphql_name="hasProjectsEnabled")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateRepositoryInput |
python | PyCQA__pylint | tests/functional/p/postponed/postponed_evaluation_pep585.py | {
"start": 1128,
"end": 1186
} | class ____(TypedDict):
my_var: list[int]
| CustomTypedDict3 |
python | walkccc__LeetCode | solutions/344. Reverse String/344.py | {
"start": 0,
"end": 168
} | class ____:
def reverseString(self, s: list[str]) -> None:
l = 0
r = len(s) - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
| Solution |
python | tiangolo__fastapi | tests/test_dependency_yield_scope.py | {
"start": 781,
"end": 6873
} | class ____:
def __init__(self, name: str = "default") -> None:
self.name = name
self.open = True
def get_named_session(session: SessionRequestDep, session_b: SessionDefaultDep) -> Any:
assert session is session_b
named_session = NamedSession(name="named")
yield named_session, session_b
named_session.open = False
NamedSessionsDep = Annotated[Tuple[NamedSession, Session], Depends(get_named_session)]
def get_named_func_session(session: SessionFuncDep) -> Any:
named_session = NamedSession(name="named")
yield named_session, session
named_session.open = False
def get_named_regular_func_session(session: SessionFuncDep) -> Any:
named_session = NamedSession(name="named")
return named_session, session
BrokenSessionsDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_func_session)
]
NamedSessionsFuncDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_func_session, scope="function")
]
RegularSessionsDep = Annotated[
Tuple[NamedSession, Session], Depends(get_named_regular_func_session)
]
app = FastAPI()
router = APIRouter()
@router.get("/")
def get_index():
return {"status": "ok"}
@app.get("/function-scope")
def function_scope(session: SessionFuncDep) -> Any:
def iter_data():
yield json.dumps({"is_open": session.open})
return StreamingResponse(iter_data())
@app.get("/request-scope")
def request_scope(session: SessionRequestDep) -> Any:
def iter_data():
yield json.dumps({"is_open": session.open})
return StreamingResponse(iter_data())
@app.get("/two-scopes")
def get_stream_session(
function_session: SessionFuncDep, request_session: SessionRequestDep
) -> Any:
def iter_data():
yield json.dumps(
{"func_is_open": function_session.open, "req_is_open": request_session.open}
)
return StreamingResponse(iter_data())
@app.get("/sub")
def get_sub(sessions: NamedSessionsDep) -> Any:
def iter_data():
yield json.dumps(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
return StreamingResponse(iter_data())
@app.get("/named-function-scope")
def get_named_function_scope(sessions: NamedSessionsFuncDep) -> Any:
def iter_data():
yield json.dumps(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
return StreamingResponse(iter_data())
@app.get("/regular-function-scope")
def get_regular_function_scope(sessions: RegularSessionsDep) -> Any:
def iter_data():
yield json.dumps(
{"named_session_open": sessions[0].open, "session_open": sessions[1].open}
)
return StreamingResponse(iter_data())
app.include_router(
prefix="/router-scope-function",
router=router,
dependencies=[Depends(raise_after_yield, scope="function")],
)
app.include_router(
prefix="/router-scope-request",
router=router,
dependencies=[Depends(raise_after_yield, scope="request")],
)
client = TestClient(app)
def test_function_scope() -> None:
response = client.get("/function-scope")
assert response.status_code == 200
data = response.json()
assert data["is_open"] is False
def test_request_scope() -> None:
response = client.get("/request-scope")
assert response.status_code == 200
data = response.json()
assert data["is_open"] is True
def test_two_scopes() -> None:
response = client.get("/two-scopes")
assert response.status_code == 200
data = response.json()
assert data["func_is_open"] is False
assert data["req_is_open"] is True
def test_sub() -> None:
response = client.get("/sub")
assert response.status_code == 200
data = response.json()
assert data["named_session_open"] is True
assert data["session_open"] is True
def test_broken_scope() -> None:
with pytest.raises(
FastAPIError,
match='The dependency "get_named_func_session" has a scope of "request", it cannot depend on dependencies with scope "function"',
):
@app.get("/broken-scope")
def get_broken(sessions: BrokenSessionsDep) -> Any: # pragma: no cover
pass
def test_named_function_scope() -> None:
response = client.get("/named-function-scope")
assert response.status_code == 200
data = response.json()
assert data["named_session_open"] is False
assert data["session_open"] is False
def test_regular_function_scope() -> None:
response = client.get("/regular-function-scope")
assert response.status_code == 200
data = response.json()
assert data["named_session_open"] is True
assert data["session_open"] is False
def test_router_level_dep_scope_function() -> None:
response = client.get("/router-scope-function/")
assert response.status_code == 503
assert response.json() == {"detail": "Exception after yield"}
def test_router_level_dep_scope_request() -> None:
with TestClient(app, raise_server_exceptions=False) as client:
response = client.get("/router-scope-request/")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
def test_app_level_dep_scope_function() -> None:
app = FastAPI(dependencies=[Depends(raise_after_yield, scope="function")])
@app.get("/app-scope-function")
def get_app_scope_function():
return {"status": "ok"}
with TestClient(app) as client:
response = client.get("/app-scope-function")
assert response.status_code == 503
assert response.json() == {"detail": "Exception after yield"}
def test_app_level_dep_scope_request() -> None:
app = FastAPI(dependencies=[Depends(raise_after_yield, scope="request")])
@app.get("/app-scope-request")
def get_app_scope_request():
return {"status": "ok"}
with TestClient(app, raise_server_exceptions=False) as client:
response = client.get("/app-scope-request")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
| NamedSession |
python | numba__llvmlite | llvmlite/binding/newpassmanagers.py | {
"start": 1647,
"end": 3302
} | class ____(Structure):
_fields_ = [
('basicblock', c_size_t),
('diamond', c_size_t),
('fanout', c_size_t),
('fanout_raise', c_size_t)]
def dump_refprune_stats(printout=False):
""" Returns a namedtuple containing the current values for the refop pruning
statistics. If kwarg `printout` is True the stats are printed to stderr,
default is False.
"""
stats = _c_PruneStats(0, 0, 0, 0)
do_print = c_bool(printout)
ffi.lib.LLVMPY_DumpRefPruneStats(byref(stats), do_print)
return PruneStats(stats.basicblock, stats.diamond, stats.fanout,
stats.fanout_raise)
# TODO: Rename and add tests for these
# Although new pass manager has its own timing APIs, we still need to support
# the legacy ones as LLVM backend still used the LegacyPassManager. These APIs
# will be used to time the backend passes such as instruction selection,
# regalloc, etc
def set_time_passes(enable):
"""Enable or disable the pass timers.
Parameters
----------
enable : bool
Set to True to enable the pass timers.
Set to False to disable the pass timers.
"""
ffi.lib.LLVMPY_SetTimePasses(c_bool(enable))
def report_and_reset_timings():
"""Returns the pass timings report and resets the LLVM internal timers.
Pass timers are enabled by ``set_time_passes()``. If the timers are not
enabled, this function will return an empty string.
Returns
-------
res : str
LLVM generated timing report.
"""
with ffi.OutputString() as buf:
ffi.lib.LLVMPY_ReportAndResetTimings(buf)
return str(buf)
| _c_PruneStats |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-construct/test_flow.py | {
"start": 7698,
"end": 8060
} | class ____(BaseExecutor):
"""Class used in Flow YAML"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# pod/pod-specific
assert os.environ['key1'] == 'value1'
assert os.environ['key2'] == 'value2'
# inherit from parent process
assert os.environ['key_parent'] == 'value3'
| EnvChecker1 |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/pipes.py | {
"start": 23171,
"end": 30860
} | class ____(BasePipesDatabricksClient, TreatAsResourceParam):
"""Pipes client for Databricks Serverless.
Args:
client (WorkspaceClient): A databricks `WorkspaceClient` object.
volume_path (str): Path to the volume that will be used by this client to read and write temporary files.
context_injector (Optional[PipesContextInjector]): A context injector to use to inject
context into the k8s container process. Defaults to :py:class:`PipesUnityCatalogVolumesContextInjector`.
message_reader (Optional[PipesMessageReader]): A message reader to use to read messages
from the Databricks Serverless job. Defaults to :py:class:`PipesUnityCatalogVolumesMessageReader`.
poll_interval_seconds (float): How long to sleep between checking the status of the job run.
Defaults to 5.
forward_termination (bool): Whether to cancel the Databricks Serverless job if the orchestration process
is interrupted or canceled. Defaults to True.
"""
def __init__(
self,
client: WorkspaceClient,
volume_path: str,
context_injector: Optional[PipesContextInjector] = None,
message_reader: Optional[PipesMessageReader] = None,
poll_interval_seconds: float = 5,
forward_termination: bool = True,
):
self.volume_path = volume_path
super().__init__(
client=client,
context_injector=context_injector,
message_reader=message_reader,
poll_interval_seconds=poll_interval_seconds,
forward_termination=forward_termination,
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def run( # pyright: ignore[reportIncompatibleMethodOverride]
self,
*,
context: Union[OpExecutionContext, AssetExecutionContext],
extras: Optional[PipesExtras] = None,
task: jobs.SubmitTask,
submit_args: Optional[Mapping[str, Any]] = None,
) -> PipesClientCompletedInvocation:
"""Synchronously execute a Databricks job with the pipes protocol.
Args:
task (databricks.sdk.service.jobs.SubmitTask): Specification of the Databricks Serverless
task to run.
Field `task_key` must be provided. One of the following task fields must be provided: `notebook_task`,
`spark_python_task` or `python_wheel_task`. For Python tasks, field `environment_key` must be provided.
Note that when `environment_key` is provided, `environments` must be passed
in the `submit_args` of this method.
context (Union[OpExecutionContext, AssetExecutionContext]): The context from the executing op or asset.
extras (Optional[PipesExtras]): An optional dict of extra parameters to pass to the
subprocess.
submit_args (Optional[Mapping[str, Any]]): Additional keyword arguments that will be
forwarded as-is to `WorkspaceClient.jobs.submit`.
Returns:
PipesClientCompletedInvocation: Wrapper containing results reported by the external
process.
"""
context_injector = self.context_injector or PipesUnityCatalogVolumesContextInjector(
client=self.client, volume_path=self.volume_path
)
message_reader = self.message_reader or PipesUnityCatalogVolumesMessageReader(
client=self.client,
volume_path=self.volume_path,
)
with open_pipes_session(
context=context,
extras=extras,
context_injector=context_injector,
message_reader=message_reader,
) as pipes_session:
submit_task_dict = task.as_dict()
submit_task_dict = self._enrich_submit_task_dict(
context=context, session=pipes_session, submit_task_dict=submit_task_dict
)
submit_task = jobs.SubmitTask.from_dict(submit_task_dict)
run_id = self.client.jobs.submit(
tasks=[submit_task],
**(submit_args or {}),
).bind()["run_id"]
try:
self._poll_til_success(context, run_id)
except DagsterExecutionInterruptedError:
if self.forward_termination:
context.log.info("[pipes] execution interrupted, canceling Databricks job.")
self.client.jobs.cancel_run(run_id)
self._poll_til_terminating(run_id)
return PipesClientCompletedInvocation(
pipes_session, metadata=self._extract_dagster_metadata(run_id)
)
def _enrich_submit_task_dict(
self,
context: Union[OpExecutionContext, AssetExecutionContext],
session: PipesSession,
submit_task_dict: dict[str, Any],
) -> dict[str, Any]:
if "notebook_task" in submit_task_dict:
existing_params = submit_task_dict["notebook_task"].get("base_parameters", {})
# merge the existing parameters with the CLI arguments
existing_params = {**existing_params, **session.get_bootstrap_env_vars()}
submit_task_dict["notebook_task"]["base_parameters"] = existing_params
else:
cli_args = session.get_bootstrap_cli_arguments() # this is a mapping
for task_type in self.get_task_fields_which_support_cli_parameters():
if task_type in submit_task_dict:
existing_params = submit_task_dict[task_type].get("parameters", [])
# merge the existing parameters with the CLI arguments
for key, value in cli_args.items():
existing_params.extend([key, value])
submit_task_dict[task_type]["parameters"] = existing_params
context.log.debug(
f"Passing Pipes bootstrap parameters "
f'via Databricks parameters as "{key}.parameters". ' # pyright: ignore[reportPossiblyUnboundVariable]
f"Make sure to use the PipesCliArgsParamsLoader in the task."
)
break
submit_task_dict["tags"] = {
**submit_task_dict.get("tags", {}),
**session.default_remote_invocation_info,
}
return submit_task_dict
@contextmanager
def volumes_tempdir(files_client: files.FilesAPI, volume_path: str) -> Iterator[str]:
dirname = "".join(random.choices(string.ascii_letters, k=30))
tempdir = f"{volume_path}/tmp/{dirname}"
files_client.create_directory(tempdir)
try:
yield tempdir
finally:
delete_volume_directory_recursive(files_client, tempdir)
pass
def delete_volume_directory_recursive(volumes_client: files.FilesAPI, directory_path: str) -> None:
"""Recursively delete a directory and all its contents in Unity Catalog Volumes.
Args:
volumes_client: The Files API client
directory_path: Path to directory
"""
# List all contents in the directory
contents = list(volumes_client.list_directory_contents(directory_path))
# Delete each item recursively
for item in contents:
item_path = f"{directory_path.rstrip('/')}/{item.name}"
if item.is_directory:
# Recursively delete subdirectory
delete_volume_directory_recursive(volumes_client, item_path)
else:
volumes_client.delete(item_path)
# Finally delete the empty directory
volumes_client.delete_directory(directory_path)
| PipesDatabricksServerlessClient |
python | huggingface__transformers | src/transformers/models/squeezebert/modeling_squeezebert.py | {
"start": 15836,
"end": 19122
} | class ____(SqueezeBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = SqueezeBertEmbeddings(config)
self.encoder = SqueezeBertEncoder(config)
self.pooler = SqueezeBertPooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring
| SqueezeBertModel |
python | walkccc__LeetCode | solutions/1246. Palindrome Removal/1246.py | {
"start": 0,
"end": 717
} | class ____:
def minimumMoves(self, arr: list[int]) -> int:
n = len(arr)
# dp[i][j] := the minimum number of moves to remove all numbers from arr[i..j]
dp = [[n] * n for _ in range(n)]
for i in range(n):
dp[i][i] = 1
for i in range(n - 1):
dp[i][i + 1] = 1 if arr[i] == arr[i + 1] else 2
for d in range(2, n):
for i in range(n - d):
j = i + d
# Remove arr[i] and arr[j] within the move of removing
# arr[i + 1..j - 1]
if arr[i] == arr[j]:
dp[i][j] = dp[i + 1][j - 1]
# Try all the possible partitions.
for k in range(i, j):
dp[i][j] = min(dp[i][j], dp[i][k] + dp[k + 1][j])
return dp[0][n - 1]
| Solution |
python | streamlit__streamlit | lib/streamlit/watcher/event_based_path_watcher.py | {
"start": 2541,
"end": 4452
} | class ____:
"""Watches a single path on disk using watchdog."""
@staticmethod
def close_all() -> None:
"""Close the _MultiPathWatcher singleton."""
path_watcher = _MultiPathWatcher.get_singleton()
path_watcher.close()
_LOGGER.debug("Watcher closed")
def __init__(
self,
path: str,
on_changed: Callable[[str], None],
*, # keyword-only arguments:
glob_pattern: str | None = None,
allow_nonexistent: bool = False,
) -> None:
"""Constructor for EventBasedPathWatchers.
Parameters
----------
path : str
The path to watch.
on_changed : Callable[[str], None]
Callback to call when the path changes.
glob_pattern : str or None
A glob pattern to filter the files in a directory that should be
watched. Only relevant when creating an EventBasedPathWatcher on a
directory.
allow_nonexistent : bool
If True, the watcher will not raise an exception if the path does
not exist. This can be used to watch for the creation of a file or
directory at a given path.
"""
self._path = os.path.realpath(path)
self._on_changed = on_changed
path_watcher = _MultiPathWatcher.get_singleton()
path_watcher.watch_path(
self._path,
on_changed,
glob_pattern=glob_pattern,
allow_nonexistent=allow_nonexistent,
)
_LOGGER.debug("Watcher created for %s", self._path)
def __repr__(self) -> str:
return repr_(self)
def close(self) -> None:
"""Stop watching the path corresponding to this EventBasedPathWatcher."""
path_watcher = _MultiPathWatcher.get_singleton()
path_watcher.stop_watching_path(self._path, self._on_changed)
| EventBasedPathWatcher |
python | ray-project__ray | python/ray/data/tests/test_webdataset.py | {
"start": 217,
"end": 9067
} | class ____:
def __init__(self, path):
self.path = path
self.tar = tarfile.open(path, "w")
def __enter__(self):
return self
def __exit__(self, *args):
self.tar.close()
def write(self, name, data):
f = self.tar.tarinfo()
f.name = name
f.size = len(data)
self.tar.addfile(f, io.BytesIO(data))
def test_webdataset_read(ray_start_2_cpus, tmp_path):
path = os.path.join(tmp_path, "bar_000000.tar")
with TarWriter(path) as tf:
for i in range(100):
tf.write(f"{i}.a", str(i).encode("utf-8"))
tf.write(f"{i}.b", str(i**2).encode("utf-8"))
assert os.path.exists(path)
assert len(glob.glob(f"{tmp_path}/*.tar")) == 1
ds = ray.data.read_webdataset(paths=[str(tmp_path)])
samples = ds.take(100)
assert len(samples) == 100
for i, sample in enumerate(samples):
assert isinstance(sample, dict), sample
assert sample["__key__"] == str(i)
assert sample["a"].decode("utf-8") == str(i)
assert sample["b"].decode("utf-8") == str(i**2)
def test_webdataset_expand_json(ray_start_2_cpus, tmp_path):
import numpy as np
import torch
image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
gray = np.random.randint(0, 255, (100, 100), dtype=np.uint8)
dstruct = dict(a=[1, 2], b=dict(c=2), d="hello")
ttensor = torch.tensor([1, 2, 3]).numpy()
sample = {
"__key__": "foo",
"jpg": image,
"gray.png": gray,
"mp": dstruct,
"json": dstruct,
"pt": ttensor,
"und": b"undecoded",
"custom": b"nothing",
}
# write the encoded data using the default encoder
data = [sample]
ds = ray.data.from_items(data).repartition(1)
ds.write_webdataset(path=tmp_path, try_create_dir=True)
ds = ray.data.read_webdataset(
paths=[str(tmp_path)], override_num_blocks=1, expand_json=True
)
record = ds.take(1)
assert [1, 2] == record[0]["a"]
def test_webdataset_suffixes(ray_start_2_cpus, tmp_path):
path = os.path.join(tmp_path, "bar_000000.tar")
with TarWriter(path) as tf:
for i in range(100):
tf.write(f"{i}.txt", str(i).encode("utf-8"))
tf.write(f"{i}.test.txt", str(i**2).encode("utf-8"))
tf.write(f"{i}.cls", str(i**2).encode("utf-8"))
tf.write(f"{i}.test.cls2", str(i**2).encode("utf-8"))
assert os.path.exists(path)
assert len(glob.glob(f"{tmp_path}/*.tar")) == 1
# test simple suffixes
ds = ray.data.read_webdataset(paths=[str(tmp_path)], suffixes=["txt", "cls"])
samples = ds.take(100)
assert len(samples) == 100
for i, sample in enumerate(samples):
assert set(sample.keys()) == {"__url__", "__key__", "txt", "cls"}
# test fnmatch patterns for suffixes
ds = ray.data.read_webdataset(paths=[str(tmp_path)], suffixes=["*.txt", "*.cls"])
samples = ds.take(100)
assert len(samples) == 100
for i, sample in enumerate(samples):
assert set(sample.keys()) == {"__url__", "__key__", "txt", "cls", "test.txt"}
# test selection function
def select(name):
return name.endswith("txt")
ds = ray.data.read_webdataset(paths=[str(tmp_path)], suffixes=select)
samples = ds.take(100)
assert len(samples) == 100
for i, sample in enumerate(samples):
assert set(sample.keys()) == {"__url__", "__key__", "txt", "test.txt"}
# test filerename
def renamer(name):
result = name.replace("txt", "text")
print("***", name, result)
return result
ds = ray.data.read_webdataset(paths=[str(tmp_path)], filerename=renamer)
samples = ds.take(100)
assert len(samples) == 100
for i, sample in enumerate(samples):
assert set(sample.keys()) == {
"__url__",
"__key__",
"text",
"cls",
"test.text",
"test.cls2",
}
def test_webdataset_write(ray_start_2_cpus, tmp_path):
print(ray.available_resources())
data = [dict(__key__=str(i), a=str(i), b=str(i**2)) for i in range(100)]
ds = ray.data.from_items(data).repartition(1)
ds.write_webdataset(path=tmp_path, try_create_dir=True)
paths = glob.glob(f"{tmp_path}/*.tar")
assert len(paths) == 1
with open(paths[0], "rb") as stream:
tf = tarfile.open(fileobj=stream)
for i in range(100):
assert tf.extractfile(f"{i}.a").read().decode("utf-8") == str(i)
assert tf.extractfile(f"{i}.b").read().decode("utf-8") == str(i**2)
def custom_decoder(sample):
for key, value in sample.items():
if key == "png":
# check that images have already been decoded
assert not isinstance(value, bytes)
elif key.endswith("custom"):
sample[key] = "custom-value"
return sample
def test_webdataset_coding(ray_start_2_cpus, tmp_path):
import numpy as np
import PIL.Image
import torch
image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
gray = np.random.randint(0, 255, (100, 100), dtype=np.uint8)
dstruct = dict(a=[1], b=dict(c=2), d="hello")
ttensor = torch.tensor([1, 2, 3]).numpy()
sample = {
"__key__": "foo",
"jpg": image,
"gray.png": gray,
"mp": dstruct,
"json": dstruct,
"pt": ttensor,
"und": b"undecoded",
"custom": b"nothing",
}
# write the encoded data using the default encoder
data = [sample]
ds = ray.data.from_items(data).repartition(1)
ds.write_webdataset(path=tmp_path, try_create_dir=True)
# read the encoded data using the default decoder
paths = glob.glob(f"{tmp_path}/*.tar")
assert len(paths) == 1
path = paths[0]
assert os.path.exists(path)
ds = ray.data.read_webdataset(paths=[str(tmp_path)])
samples = ds.take(1)
assert len(samples) == 1
for sample in samples:
assert isinstance(sample, dict), sample
assert sample["__key__"] == "foo"
assert isinstance(sample["jpg"], np.ndarray)
assert sample["jpg"].shape == (100, 100, 3)
assert isinstance(sample["gray.png"], np.ndarray)
assert sample["gray.png"].shape == (100, 100)
assert isinstance(sample["mp"], dict)
assert sample["mp"]["a"] == [1]
assert sample["mp"]["b"]["c"] == 2
assert isinstance(sample["json"], dict)
assert sample["json"]["a"] == [1]
assert isinstance(sample["pt"], np.ndarray)
assert sample["pt"].tolist() == [1, 2, 3]
# test the format argument to the default decoder and multiple decoders
ds = ray.data.read_webdataset(
paths=[str(tmp_path)], decoder=["PIL", custom_decoder]
)
samples = ds.take(1)
assert len(samples) == 1
for sample in samples:
assert isinstance(sample, dict), sample
assert sample["__key__"] == "foo"
assert isinstance(sample["jpg"], PIL.Image.Image)
assert isinstance(sample["gray.png"], PIL.Image.Image)
assert isinstance(sample["und"], bytes)
assert sample["und"] == b"undecoded"
assert sample["custom"] == "custom-value"
def test_webdataset_decoding(ray_start_2_cpus, tmp_path):
import numpy as np
import torch
image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)
gray = np.random.randint(0, 255, (100, 100), dtype=np.uint8)
dstruct = dict(a=np.nan, b=dict(c=2), d="hello", e={"img_filename": "for_test.jpg"})
ttensor = torch.tensor([1, 2, 3]).numpy()
sample = {
"__key__": "foo",
"jpg": image,
"gray.png": gray,
"mp": dstruct,
"json": dstruct,
"pt": ttensor,
"und": b"undecoded",
"custom": b"nothing",
}
# write the encoded data using the default encoder
data = [sample]
ds = ray.data.from_items(data).repartition(1)
ds.write_webdataset(path=tmp_path, try_create_dir=True)
ds = ray.data.read_webdataset(
paths=[str(tmp_path)],
override_num_blocks=1,
decoder=None,
)
samples = ds.take(1)
import json
meta_json = json.loads(samples[0]["json"].decode("utf-8"))
assert meta_json["e"]["img_filename"] == "for_test.jpg"
@pytest.mark.parametrize("min_rows_per_file", [5, 10, 50])
def test_write_min_rows_per_file(tmp_path, ray_start_regular_shared, min_rows_per_file):
ray.data.from_items(
[{"id": str(i)} for i in range(100)], override_num_blocks=20
).write_webdataset(tmp_path, min_rows_per_file=min_rows_per_file)
for filename in os.listdir(tmp_path):
dataset = wds.WebDataset(os.path.join(tmp_path, filename))
assert len(list(dataset)) == min_rows_per_file
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TarWriter |
python | django__django | tests/model_forms/tests.py | {
"start": 32019,
"end": 32351
} | class ____(forms.ModelForm):
"""
A form that replaces the model's url field with a custom one. This should
prevent the model field's validation from being called.
"""
url = forms.CharField(required=False)
class Meta:
fields = ("name", "slug")
model = Category
| IncompleteCategoryFormWithFields |
python | openai__openai-python | src/openai/resources/beta/chatkit/threads.py | {
"start": 18502,
"end": 19067
} | class ____:
def __init__(self, threads: AsyncThreads) -> None:
self._threads = threads
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
threads.retrieve,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
threads.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
threads.delete,
)
self.list_items = _legacy_response.async_to_raw_response_wrapper(
threads.list_items,
)
| AsyncThreadsWithRawResponse |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_health/asset_materialization_health.py | {
"start": 14489,
"end": 14678
} | class ____:
num_failed_partitions: int
num_missing_partitions: int
total_num_partitions: int
@whitelist_for_serdes
@record.record
| AssetHealthMaterializationDegradedPartitionedMeta |
python | pyca__cryptography | src/cryptography/hazmat/primitives/serialization/ssh.py | {
"start": 6758,
"end": 8404
} | class ____:
"""Build recursive structure without data copy."""
flist: list[utils.Buffer]
def __init__(self, init: list[utils.Buffer] | None = None) -> None:
self.flist = []
if init:
self.flist.extend(init)
def put_raw(self, val: utils.Buffer) -> None:
"""Add plain bytes"""
self.flist.append(val)
def put_u32(self, val: int) -> None:
"""Big-endian uint32"""
self.flist.append(val.to_bytes(length=4, byteorder="big"))
def put_u64(self, val: int) -> None:
"""Big-endian uint64"""
self.flist.append(val.to_bytes(length=8, byteorder="big"))
def put_sshstr(self, val: bytes | _FragList) -> None:
"""Bytes prefixed with u32 length"""
if isinstance(val, (bytes, memoryview, bytearray)):
self.put_u32(len(val))
self.flist.append(val)
else:
self.put_u32(val.size())
self.flist.extend(val.flist)
def put_mpint(self, val: int) -> None:
"""Big-endian bigint prefixed with u32 length"""
self.put_sshstr(_to_mpint(val))
def size(self) -> int:
"""Current number of bytes"""
return sum(map(len, self.flist))
def render(self, dstbuf: memoryview, pos: int = 0) -> int:
"""Write into bytearray"""
for frag in self.flist:
flen = len(frag)
start, pos = pos, pos + flen
dstbuf[start:pos] = frag
return pos
def tobytes(self) -> bytes:
"""Return as bytes"""
buf = memoryview(bytearray(self.size()))
self.render(buf)
return buf.tobytes()
| _FragList |
python | weaviate__weaviate-python-client | weaviate/collections/classes/generative.py | {
"start": 7290,
"end": 7772
} | class ____(_GenerativeConfigRuntime):
generative: Union[GenerativeSearches, _EnumLikeStr] = Field(
default=GenerativeSearches.DUMMY, frozen=True, exclude=True
)
def _to_grpc(self, opts: _GenerativeConfigRuntimeOptions) -> generative_pb2.GenerativeProvider:
self._validate_multi_modal(opts)
return generative_pb2.GenerativeProvider(
return_metadata=opts.return_metadata, dummy=generative_pb2.GenerativeDummy()
)
| _GenerativeDummy |
python | aio-libs__aiohttp | aiohttp/streams.py | {
"start": 506,
"end": 994
} | class ____(Generic[_T]):
__slots__ = ("read_func",)
def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None:
self.read_func = read_func
def __aiter__(self) -> "AsyncStreamIterator[_T]":
return self
async def __anext__(self) -> _T:
try:
rv = await self.read_func()
except EofStream:
raise StopAsyncIteration
if rv == b"":
raise StopAsyncIteration
return rv
| AsyncStreamIterator |
python | django__django | django/db/utils.py | {
"start": 3986,
"end": 6515
} | class ____(BaseConnectionHandler):
settings_name = "DATABASES"
# Connections needs to still be an actual thread local, as it's truly
# thread-critical. Database backends should use @async_unsafe to protect
# their code from async contexts, but this will give those contexts
# separate connections in case it's needed as well. There's no cleanup
# after async contexts, though, so we don't allow that if we can help it.
thread_critical = True
def configure_settings(self, databases):
databases = super().configure_settings(databases)
if databases == {}:
databases[DEFAULT_DB_ALIAS] = {"ENGINE": "django.db.backends.dummy"}
elif DEFAULT_DB_ALIAS not in databases:
raise ImproperlyConfigured(
f"You must define a '{DEFAULT_DB_ALIAS}' database."
)
elif databases[DEFAULT_DB_ALIAS] == {}:
databases[DEFAULT_DB_ALIAS]["ENGINE"] = "django.db.backends.dummy"
# Configure default settings.
for conn in databases.values():
conn.setdefault("ATOMIC_REQUESTS", False)
conn.setdefault("AUTOCOMMIT", True)
conn.setdefault("ENGINE", "django.db.backends.dummy")
if conn["ENGINE"] == "django.db.backends." or not conn["ENGINE"]:
conn["ENGINE"] = "django.db.backends.dummy"
conn.setdefault("CONN_MAX_AGE", 0)
conn.setdefault("CONN_HEALTH_CHECKS", False)
conn.setdefault("OPTIONS", {})
conn.setdefault("TIME_ZONE", None)
for setting in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]:
conn.setdefault(setting, "")
test_settings = conn.setdefault("TEST", {})
default_test_settings = [
("CHARSET", None),
("COLLATION", None),
("MIGRATE", True),
("MIRROR", None),
("NAME", None),
]
for key, value in default_test_settings:
test_settings.setdefault(key, value)
return databases
@property
def databases(self):
# Maintained for backward compatibility as some 3rd party packages have
# made use of this private API in the past. It is no longer used within
# Django itself.
return self.settings
def create_connection(self, alias):
db = self.settings[alias]
backend = load_backend(db["ENGINE"])
return backend.DatabaseWrapper(db, alias)
| ConnectionHandler |
python | PrefectHQ__prefect | tests/results/test_result_record.py | {
"start": 315,
"end": 2349
} | class ____:
def test_deserialize_with_full_data(self):
record = ResultRecord(
result="The results are in...",
metadata=ResultRecordMetadata(
storage_key="my-storage-key", serializer=JSONSerializer()
),
)
serialized = record.serialize()
deserialized = ResultRecord.deserialize(serialized)
assert deserialized.result == "The results are in..."
def test_deserialize_with_result_only(self):
serialized = JSONSerializer().dumps("The results are in...")
with pytest.raises(ValidationError):
ResultRecord.deserialize(serialized)
# need to pass serializer to deserialize raw result
deserialized = ResultRecord.deserialize(
serialized, backup_serializer=JSONSerializer()
)
assert deserialized.result == "The results are in..."
async def test_from_metadata(self):
store = ResultStore()
result_record = store.create_result_record("The results are in...", "the-key")
await store.apersist_result_record(result_record)
loaded = await ResultStore._from_metadata(result_record.metadata)
assert loaded.result == "The results are in..."
async def test_from_metadata_with_raw_result(self):
# set up result store to persist a raw result with no metadata
store = ResultStore(
metadata_storage=NullFileSystem(), serializer=JSONSerializer()
)
result_record = store.create_result_record("The results are in...", "the-key")
await store.apersist_result_record(result_record)
loaded = await ResultStore._from_metadata(result_record.metadata)
assert loaded.result == "The results are in..."
# assert that the raw result was persisted without metadata
assert (
JSONSerializer().loads(
(PREFECT_LOCAL_STORAGE_PATH.value() / "the-key").read_bytes()
)
== "The results are in..."
)
| TestResultRecord |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/retrievers/natural_language/nl_csv_retriever.py | {
"start": 409,
"end": 1092
} | class ____(NLDataframeRetriever):
def __init__(
self,
csv_path: str,
llm: llm,
name: Optional[str] = None,
text_to_sql_prompt: Optional[BasePromptTemplate] = None,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
):
df = pd.read_csv(csv_path)
super().__init__(
df=df,
llm=llm,
text_to_sql_prompt=text_to_sql_prompt,
similarity_top_k=similarity_top_k,
verbose=verbose,
name=name,
callback_manager=callback_manager,
)
| NLCSVRetriever |
python | pytorch__pytorch | test/dynamo/test_export.py | {
"start": 154074,
"end": 156906
} | class ____(torch._dynamo.test_case.TestCase):
def test_export_with_parameters(self, device):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.features = torch.nn.Sequential(
torch.nn.Conv2d(
3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
),
torch.nn.ReLU(inplace=True),
)
def forward(self, x):
return self.features(x)
model = MyModule().eval().to(device)
random_inputs = (torch.rand([32, 3, 32, 32]).to(device),)
dim_x = torch.export.Dim("dim_x", min=1, max=32)
exp_program = torch.export.export(
model, random_inputs, dynamic_shapes={"x": {0: dim_x}}, strict=True
)
output_buffer = io.BytesIO()
# Tests if we can restore saved nn.Parameters when we load them again
torch.export.save(exp_program, output_buffer)
loaded_model = torch.export.load(output_buffer)
self.assertTrue(
isinstance(
loaded_model.module().get_parameter("features.0.weight"),
torch.nn.Parameter,
)
)
def test_export_fast_binary_broadcast_check(self, device):
# This test looks at the case where we erroneously create a guard
# when checking the equality of the operands' shape and the output
# shape during FakeTensor's binary op fast path.
class MyModel(torch.nn.Module):
def forward(self, a, b):
# final shape is (dim0, 4, 8)
# order matters since a & the output have the same shape
return b + a
a = torch.randn(100, 4, 8)
b = torch.randn(4, 8)
model = MyModel().eval().to(device)
batchsize = torch.export.Dim("dim0", min=3, max=1024)
dynamic_shape_spec = {"a": [batchsize, None, None], "b": [None, None]}
torch.export.export(
model, (a, b), dynamic_shapes=dynamic_shape_spec, strict=True
)
def test_export_fast_binary_broadcast_check_unbacked(self, device):
class MyModel(torch.nn.Module):
def forward(self, numel, scalar):
u0 = numel.item()
x = torch.ones(u0 + 1)
return scalar - x
model = MyModel().eval().to(device)
numel = torch.tensor(10)
scalar = torch.randn(1)
torch.export.export(model, (numel, scalar), strict=True)
common_utils.instantiate_parametrized_tests(ExportTests)
devices = ["cuda", "hpu"]
instantiate_device_type_tests(ExportTestsDevice, globals(), only_for=devices)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| ExportTestsDevice |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 15664,
"end": 16646
} | class ____(nn.Module):
def __init__(self, config, layer_idx=None, is_cross_attention=False):
super().__init__()
self.self = EsmSelfAttention(config, layer_idx=layer_idx, is_cross_attention=is_cross_attention)
self.output = EsmSelfOutput(config)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
**kwargs: Unpack[TransformersKwargs],
):
hidden_states_ln = self.LayerNorm(hidden_states)
attn_output, _ = self.self(
hidden_states_ln,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
**kwargs,
)
attn_output = self.output(attn_output, hidden_states)
return attn_output
| EsmAttention |
python | google__jax | jax/experimental/mosaic/gpu/utils.py | {
"start": 17162,
"end": 31306
} | class ____:
base: ir.Value | int
length: int
def __post_init__(self):
if isinstance(self.base, int) and self.base < 0:
raise ValueError(f"base must be non-negative, got {self.base}")
if self.length < 0:
raise ValueError(f"length must be non-negative, got {self.length}")
ds = DynamicSlice
def memref_slice(ref: ir.Value, index) -> ir.Value:
ref_ty = ir.MemRefType(ref.type)
base_indices, slice_shape, is_squeezed = parse_indices(index, ref_ty.shape)
# TODO(apaszke): Check that slice is within the memref (indices might be
# dynamic, but we can at least catch some OOB slices).
memref_strides, offset = ref_ty.get_strides_and_offset()
dynamic_offset = ir.ShapedType.get_dynamic_stride_or_offset()
new_offset = offset
if new_offset != dynamic_offset:
for idx, stride in zip(base_indices, memref_strides):
if isinstance(idx, int):
new_offset += idx * stride
else:
new_offset = dynamic_offset
break
new_strides = [
s for s, squeeze in zip(memref_strides, is_squeezed) if not squeeze
]
new_shape = [s for s, squeeze in zip(slice_shape, is_squeezed) if not squeeze]
new_layout = ir.StridedLayoutAttr.get(new_offset, new_strides)
ref_slice = memref.subview(
ref,
base_indices,
slice_shape,
[1] * len(ref_ty.shape),
result_type=ir.MemRefType.get(
new_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
),
)
return ref_slice
def _is_contiguous_shape_slice(
ref_ty: ir.MemRefType, dim_slice: slice | None = slice(None)
):
# If it's not a strided layout then we are definitely contiguous.
if not ir.StridedLayoutAttr.isinstance(ref_ty.layout):
return True
strides = ir.StridedLayoutAttr(ref_ty.layout).strides[dim_slice]
shape = ref_ty.shape[dim_slice]
# Check that each dimension fits exactly it the immediately larger stride.
ss = sorted(zip(strides, shape), key=lambda x: x[0], reverse=True)
for (prev_stride, _), (stride, shape) in zip(ss, ss[1:]):
if stride * shape != prev_stride:
return False
return True
def _reshape(ref: ir.Value, sh0: list[int], sh1: list[int]):
"""Reshapes using only "parallel" folds/unfolds.
This function uses folds/unfolds that are "parallel" in that they
only act on original dimensions, i.e. they won't fold into an
intermediate dimension that they will then unfold.
"""
i0, i1 = 0, 0
def fold_until(shape, off, target) -> tuple[int, int]:
assert shape[off] < target
dim = 1
for to in range(off, len(shape)):
dim *= shape[to]
if dim == target:
return to + 1, dim
if dim > target:
# TODO(cperivol): Implement dependent fold-unfolds for subsections
# of the shape eg (..., 4,5,5, ...) -> (..., 10,10, ...) could be
# supported without touching any other dimensions.
raise NotImplementedError(
f"Can't reshape {sh0} to {sh1} by composing independent"
" folds/unfolds."
)
raise AssertionError(
f"Unreachable: number of elements don't match in each shape ({sh0} ans"
f" {sh1})"
)
while i0 < len(sh0) and i1 < len(sh1):
if sh0[i0] > sh1[i1]:
# How many dimensions following i1 should we unfold i0 into.
idx, _ = fold_until(sh1, i1, sh0[i0])
ref = memref_unfold(ref, i0, sh1[i1:idx])
sh0[i0 : i0 + 1] = sh1[i1:idx]
i0 += idx - i1
i1 = idx
elif sh0[i0] < sh1[i1]:
# How many dimensions after i0 should we fold to make dim at i1.
idx, dim = fold_until(sh0, i0, sh1[i1])
sh0[i0:idx] = [dim]
ref = memref_fold(ref, i0, idx - i0)
i0 += 1
i1 += 1
else:
i0 += 1
i1 += 1
# Fold the trailing ones
if i0 < len(sh0):
assert i1 == len(sh1)
ref = memref_fold(ref, i0 - 1, len(sh0) - i0 + 1)
if i1 < len(sh1):
assert i0 == len(sh0)
ref = memref_unfold(ref, i0 - 1, [sh0[i0 - 1]] + [1] * (len(sh1) - i1))
return ref
def memref_reshape(
ref: ir.Value | MultimemRef, shape: tuple[int, ...]
) -> ir.Value | MultimemRef:
"""Reshape by means of folding and unfolding.
The use of memref fold/unfold may avoid some possible issues with
strided memrefs.
"""
if isinstance(ref, MultimemRef):
return MultimemRef(memref_reshape(ref.ref, shape))
ref_ty = ir.MemRefType(ref.type)
if math.prod(ref_ty.shape) != math.prod(shape):
raise ValueError(
f"Cannot reshape to a different size. Ref shape: {ref_ty.shape} (size:"
f" {math.prod(ref_ty.shape)}), new shape: {shape} (size:"
f" {math.prod(shape)})"
)
if not all(dim > 0 for dim in shape):
raise ValueError(
"Shapes must havbe only positive dimensions (no -1 or 0 dimensions"
f" allowed) {shape}"
)
src_shape = list(ref_ty.shape)
dst_shape = list(shape)
if src_shape == dst_shape:
return ref
if not src_shape:
_, offset = ref_ty.get_strides_and_offset()
identity = ir.AffineMapAttr.get(ir.AffineMap.get_identity(0))
if ref_ty.layout == identity:
new_layout = ir.AffineMapAttr.get(
ir.AffineMap.get_identity(len(dst_shape))
)
else:
new_layout = ir.StridedLayoutAttr.get(offset, [1] * len(dst_shape))
result_ty = ir.MemRefType.get(
dst_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
)
return memref.expand_shape(result_ty, ref, [], [], dst_shape)
if not dst_shape:
_, offset = ref_ty.get_strides_and_offset()
identity = ir.AffineMapAttr.get(ir.AffineMap.get_identity(ref_ty.rank))
contig_strided_1d = ir.Attribute.parse("strided<[1]>")
if ref_ty.layout == identity or ref_ty.layout == contig_strided_1d:
new_layout = ir.AffineMapAttr.get(ir.AffineMap.get_identity(0))
else:
new_layout = ir.StridedLayoutAttr.get(offset, [])
result_ty = ir.MemRefType.get(
(), ref_ty.element_type, new_layout, ref_ty.memory_space
)
return memref.collapse_shape(result_ty, ref, [])
return _reshape(ref, src_shape, dst_shape)
def memref_fold(
ref: ir.Value | MultimemRef, dim, fold_rank
) -> ir.Value | MultimemRef:
if isinstance(ref, MultimemRef):
return MultimemRef(memref_fold(ref.ref, dim, fold_rank))
ref_ty = ir.MemRefType(ref.type)
new_shape = list(ref_ty.shape)
if dim < 0:
raise ValueError(f"Dimension {dim} is negative")
if dim + fold_rank > len(new_shape):
raise ValueError(
f"Folding {fold_rank} dimensions starting from {dim} is out of bounds"
f" for shape {new_shape}"
)
new_shape[dim : dim + fold_rank] = [np.prod(new_shape[dim : dim + fold_rank])]
identity = ir.AffineMapAttr.get(ir.AffineMap.get_identity(ref_ty.rank))
contig_strided_1d = ir.Attribute.parse("strided<[1]>")
# Not sure why but MLIR expects the strided 1D layout to disappear in this op.
if ref_ty.layout == identity or ref_ty.layout == contig_strided_1d:
new_layout = ir.AffineMapAttr.get(
ir.AffineMap.get_identity(ref_ty.rank - fold_rank + 1)
)
elif _is_contiguous_shape_slice(ref_ty, slice(dim, dim + fold_rank)):
new_strides, offset = ref_ty.get_strides_and_offset()
new_strides[dim : dim + fold_rank] = [new_strides[dim + fold_rank - 1]]
new_layout = ir.StridedLayoutAttr.get(offset, new_strides)
else:
raise ValueError(
f"strides={ref_ty.get_strides_and_offset()[0]}, {ref_ty.shape=},"
f" {dim=}, {fold_rank=}"
)
new_ty = ir.MemRefType.get(
new_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
)
assoc = [[d] for d in range(dim)]
assoc.append([dim + i for i in range(fold_rank)])
assoc.extend([d] for d in range(dim + fold_rank, ref_ty.rank))
assert len(assoc) == new_ty.rank
return memref.collapse_shape(new_ty, ref, assoc)
def memref_unfold(ref: ir.Value, dim, factors) -> ir.Value:
"""Unfolds dim into two dimensions, the size of leading one given be major_factor."""
ref_ty = ir.MemRefType(ref.type)
new_shape = list(ref_ty.shape)
if sum(f is None for f in factors) > 1:
raise ValueError("Can only infer one dimension")
known_factor_prod = np.prod([f for f in factors if f is not None])
if new_shape[dim] % known_factor_prod:
raise ValueError("Non-divisible unfold:", new_shape[dim], factors)
factors = tuple(
new_shape[dim] // known_factor_prod if f is None else f for f in factors
)
new_shape[dim : dim + 1] = factors
identity = ir.AffineMapAttr.get(ir.AffineMap.get_identity(ref_ty.rank))
contig_strided_1d = ir.Attribute.parse("strided<[1]>")
if ref_ty.layout == identity or ref_ty.layout == contig_strided_1d:
new_layout = ir.AffineMapAttr.get(
ir.AffineMap.get_identity(ref_ty.rank + len(factors) - 1)
)
else:
new_strides, offset = ref_ty.get_strides_and_offset()
prev_stride = new_strides[dim]
inserted_strides = []
for f in reversed(factors):
inserted_strides.append(prev_stride)
prev_stride *= f
new_strides[dim : dim + 1] = reversed(inserted_strides)
new_layout = ir.StridedLayoutAttr.get(offset, new_strides)
new_ty = ir.MemRefType.get(
new_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
)
if dim == ref_ty.rank:
assoc = [[d] for d in range(ref_ty.rank)]
assoc[-1].extend(range(ref_ty.rank, ref_ty.rank + len(factors) - 1))
else:
assoc = [[d] for d in range(dim)]
assoc.append(list(range(dim, dim + len(factors))))
assoc.extend([d + len(factors) - 1] for d in range(dim + 1, ref_ty.rank))
assert len(assoc) == ref_ty.rank
return memref.expand_shape(new_ty, ref, assoc, [], new_ty.shape)
def memref_unsqueeze(ref: ir.Value, dim) -> ir.Value:
"""Inserts a singleton dimension."""
ref_ty = ir.MemRefType(ref.type)
if dim == ref_ty.rank:
new_shape = list(ref_ty.shape)
new_shape.append(1)
identity = ir.AffineMapAttr.get(ir.AffineMap.get_identity(ref_ty.rank))
if ref_ty.layout == identity:
new_layout = ir.AffineMapAttr.get(
ir.AffineMap.get_identity(ref_ty.rank + 1)
)
else:
new_strides, offset = ref_ty.get_strides_and_offset()
new_strides.append(1)
new_layout = ir.StridedLayoutAttr.get(offset, new_strides)
new_ty = ir.MemRefType.get(
new_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
)
assoc = [[d] for d in range(ref_ty.rank)]
assoc[-1].append(ref_ty.rank)
return memref.expand_shape(new_ty, ref, assoc, [], new_ty.shape)
else:
return memref_unfold(ref, dim, (1, None))
def is_memref_transposed(ref: ir.MemRefType) -> bool:
strides, _ = ref.get_strides_and_offset()
prev_stride = math.inf
for stride in strides:
if stride > prev_stride:
return True
prev_stride = stride
return False
def memref_transpose(ref: ir.Value, permutation: Sequence[int]) -> ir.Value:
ref_ty = ir.MemRefType(ref.type)
strides, offset = ref_ty.get_strides_and_offset()
new_strides = [strides[p] for p in permutation]
new_shape = [ref_ty.shape[p] for p in permutation]
new_layout = ir.StridedLayoutAttr.get(offset, new_strides)
new_ty = ir.MemRefType.get(
new_shape, ref_ty.element_type, new_layout, ref_ty.memory_space
)
return memref.transpose(
new_ty, ref, ir.AffineMap.get_permutation(permutation)
)
def parse_indices(
index, shape: tuple[int, ...], *, check_oob: bool = True
) -> tuple[list[ir.Value | int], list[int], list[bool]]:
if not isinstance(index, tuple):
index = (index,)
if trailing_dims := len(shape) - len(index):
index += (slice(None),) * trailing_dims
base_indices = []
slice_shape = []
is_squeezed = []
for axis, (idx, bound) in enumerate(zip(index, shape)):
if isinstance(idx, (ir.Operation, ir.OpView)):
idx = idx.result
if isinstance(idx, int):
if check_oob and (idx >= bound or (idx < 0 and -idx > bound)):
raise IndexError(
f"Index {idx} along axis {axis} is out of bounds for shape {shape}"
)
base_indices.append(idx if idx >= 0 else bound + idx)
slice_shape.append(1)
is_squeezed.append(True)
elif isinstance(idx, slice):
if idx.step is not None and idx.step != 1:
raise NotImplementedError("Strided slices not implemented")
start = idx.start or 0
if start < 0:
start = bound + start
stop = idx.stop or bound
if stop < 0:
stop = bound + stop
if check_oob and (
start < 0 or start >= bound or stop < 0 or stop > bound
):
raise IndexError(
f"Slice {idx} along axis {axis} is out of bounds for shape {shape}"
)
base_indices.append(start)
slice_shape.append(stop - start)
is_squeezed.append(False)
elif isinstance(idx, DynamicSlice):
if check_oob and (
isinstance(idx.base, int) and idx.base + idx.length > bound
):
raise IndexError(
f"Slice {idx} along axis {axis} is out of bounds for shape {shape}"
)
base_indices.append(idx.base)
slice_shape.append(idx.length)
is_squeezed.append(False)
elif isinstance(idx, ir.Value):
if not ir.IndexType.isinstance(idx.type):
raise ValueError("Expected an index-typed index")
base_indices.append(idx)
slice_shape.append(1)
is_squeezed.append(True)
else:
raise NotImplementedError(type(idx))
assert len(base_indices) == len(slice_shape) == len(is_squeezed) == len(shape)
return base_indices, slice_shape, is_squeezed
def commit_shared():
nvvm.fence_proxy(
nvvm.ProxyKind.async_shared, space=nvvm.SharedSpace.shared_cta
)
warpgroup_barrier()
def warpgroup_barrier():
# gpu.barrier() uses barrier number 0, and it would be unsafe to reuse it,
# so we shift the warpgroup index by 1.
i32 = ir.IntegerType.get_signless(32)
llvm.inline_asm(
ir.Type.parse("!llvm.void"),
[arith.addi(warpgroup_idx(sync=False), c(1, i32))],
f"bar.sync $0, {WARPGROUP_SIZE};",
"r",
has_side_effects=True,
)
def warp_barrier():
nvvm.bar_warp_sync(c(0xFFFFFFFF, ir.IntegerType.get_signless(32)))
@dataclasses.dataclass(frozen=True)
| DynamicSlice |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 2864,
"end": 3017
} | class ____(AirflowException):
"""Raise by providers when imports are missing for optional provider features."""
| AirflowOptionalProviderFeatureException |
python | allegroai__clearml | clearml/backend_api/services/v2_20/models.py | {
"start": 65844,
"end": 84900
} | class ____(Request):
"""
Get all models
:param name: Get only models whose name matches this pattern (python regular
expression syntax)
:type name: str
:param user: List of user IDs used to filter results by the model's creating
user
:type user: Sequence[str]
:param ready: Indication whether to retrieve only models that are marked ready
If not supplied returns both ready and not-ready projects.
:type ready: bool
:param tags: User-defined tags list used to filter results. Prepend '-' to tag
name to indicate exclusion
:type tags: Sequence[str]
:param system_tags: System tags list used to filter results. Prepend '-' to
system tag name to indicate exclusion
:type system_tags: Sequence[str]
:param only_fields: List of model field names (if applicable, nesting is
supported using '.'). If provided, this list defines the query's projection
(only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param page: Page number, returns a specific page out of the resulting list of
models
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param project: List of associated project IDs
:type project: Sequence[str]
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param task: List of associated task IDs
:type task: Sequence[str]
:param id: List of model IDs
:type id: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param framework: List of frameworks
:type framework: Sequence[str]
:param uri: List of model URIs
:type uri: Sequence[str]
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
:param scroll_id: Scroll ID returned from the previous calls to get_all
:type scroll_id: str
:param refresh_scroll: If set then all the data received with this scroll will
be required
:type refresh_scroll: bool
:param size: The number of models to retrieve
:type size: int
"""
_service = "models"
_action = "get_all"
_version = "2.20"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"dependencies": {"page": ["page_size"]},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"framework": {
"description": "List of frameworks",
"items": {"type": "string"},
"type": ["array", "null"],
},
"id": {
"description": "List of model IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"name": {
"description": "Get only models whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": "List of model field names (if applicable, nesting is supported using '.'). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the resulting list of models",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)",
"minimum": 1,
"type": ["integer", "null"],
},
"project": {
"description": "List of associated project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"ready": {
"description": "Indication whether to retrieve only models that are marked ready If not supplied returns both ready and not-ready projects.",
"type": ["boolean", "null"],
},
"refresh_scroll": {
"description": "If set then all the data received with this scroll will be required",
"type": ["boolean", "null"],
},
"scroll_id": {
"description": "Scroll ID returned from the previous calls to get_all",
"type": ["string", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"size": {
"description": "The number of models to retrieve",
"minimum": 1,
"type": ["integer", "null"],
},
"system_tags": {
"description": "System tags list used to filter results. Prepend '-' to system tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list used to filter results. Prepend '-' to tag name to indicate exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "List of associated task IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"uri": {
"description": "List of model URIs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "List of user IDs used to filter results by the model's creating user",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
name: Optional[str] = None,
user: Optional[List[str]] = None,
ready: Optional[bool] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
only_fields: Optional[List[str]] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
project: Optional[List[str]] = None,
order_by: Optional[List[str]] = None,
task: Optional[List[str]] = None,
id: Optional[List[str]] = None,
search_text: Optional[str] = None,
framework: Optional[List[str]] = None,
uri: Optional[List[str]] = None,
_all_: Any = None,
_any_: Any = None,
last_update: Optional[str] = None,
scroll_id: Optional[str] = None,
refresh_scroll: Optional[bool] = None,
size: Optional[int] = None,
**kwargs: Any
) -> None:
super(GetAllRequest, self).__init__(**kwargs)
self.name = name
self.user = user
self.ready = ready
self.tags = tags
self.system_tags = system_tags
self.only_fields = only_fields
self.page = page
self.page_size = page_size
self.project = project
self.order_by = order_by
self.task = task
self.id = id
self.search_text = search_text
self.framework = framework
self.uri = uri
self._all_ = _all_
self._any_ = _any_
self.last_update = last_update
self.scroll_id = scroll_id
self.refresh_scroll = refresh_scroll
self.size = size
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[List[str]]:
return self._property_user
@user.setter
def user(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", (list, tuple))
self.assert_isinstance(value, "user", six.string_types, is_array=True)
self._property_user = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("only_fields")
def only_fields(self) -> Optional[List[str]]:
return self._property_only_fields
@only_fields.setter
def only_fields(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("page")
def page(self) -> Optional[int]:
return self._property_page
@page.setter
def page(self, value: Optional[int]) -> None:
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self) -> Optional[int]:
return self._property_page_size
@page_size.setter
def page_size(self, value: Optional[int]) -> None:
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("project")
def project(self) -> Optional[List[str]]:
return self._property_project
@project.setter
def project(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", (list, tuple))
self.assert_isinstance(value, "project", six.string_types, is_array=True)
self._property_project = value
@schema_property("order_by")
def order_by(self) -> Optional[List[str]]:
return self._property_order_by
@order_by.setter
def order_by(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("task")
def task(self) -> Optional[List[str]]:
return self._property_task
@task.setter
def task(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", (list, tuple))
self.assert_isinstance(value, "task", six.string_types, is_array=True)
self._property_task = value
@schema_property("id")
def id(self) -> Optional[List[str]]:
return self._property_id
@id.setter
def id(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("search_text")
def search_text(self) -> Optional[str]:
return self._property_search_text
@search_text.setter
def search_text(self, value: Optional[str]) -> None:
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("framework")
def framework(self) -> Optional[List[str]]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", (list, tuple))
self.assert_isinstance(value, "framework", six.string_types, is_array=True)
self._property_framework = value
@schema_property("uri")
def uri(self) -> Optional[List[str]]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", (list, tuple))
self.assert_isinstance(value, "uri", six.string_types, is_array=True)
self._property_uri = value
@schema_property("_all_")
def _all_(self) -> Any:
return self._property__all_
@_all_.setter
def _all_(self, value: Any) -> None:
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self) -> Any:
return self._property__any_
@_any_.setter
def _any_(self, value: Any) -> None:
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types)
self._property_last_update = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("refresh_scroll")
def refresh_scroll(self) -> Optional[bool]:
return self._property_refresh_scroll
@refresh_scroll.setter
def refresh_scroll(self, value: Optional[bool]) -> None:
if value is None:
self._property_refresh_scroll = None
return
self.assert_isinstance(value, "refresh_scroll", (bool,))
self._property_refresh_scroll = value
@schema_property("size")
def size(self) -> Optional[int]:
return self._property_size
@size.setter
def size(self, value: Optional[int]) -> None:
if value is None:
self._property_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "size", six.integer_types)
self._property_size = value
| GetAllRequest |
python | pytest-dev__pytest | testing/test_config.py | {
"start": 46722,
"end": 63080
} | class ____:
def test_basic_behavior(self, _sys_snapshot) -> None:
option_dict = {"verbose": 444, "foo": "bar", "capture": "no"}
args = ["a", "b"]
config = Config.fromdictargs(option_dict, args)
with pytest.raises(AssertionError):
config.parse(["should refuse to parse again"])
assert config.option.verbose == 444
assert config.option.foo == "bar"
assert config.option.capture == "no"
assert config.args == args
def test_invocation_params_args(self, _sys_snapshot) -> None:
"""Show that fromdictargs can handle args in their "orig" format"""
option_dict: dict[str, object] = {}
args = ["-vvvv", "-s", "a", "b"]
config = Config.fromdictargs(option_dict, args)
assert config.args == ["a", "b"]
assert config.invocation_params.args == tuple(args)
assert config.option.verbose == 4
assert config.option.capture == "no"
def test_inifilename(self, tmp_path: Path) -> None:
d1 = tmp_path.joinpath("foo")
d1.mkdir()
p1 = d1.joinpath("bar.ini")
p1.touch()
p1.write_text(
textwrap.dedent(
"""\
[pytest]
name = value
"""
),
encoding="utf-8",
)
inifilename = "../../foo/bar.ini"
option_dict = {"inifilename": inifilename, "capture": "no"}
cwd = tmp_path.joinpath("a/b")
cwd.mkdir(parents=True)
p2 = cwd.joinpath("pytest.ini")
p2.touch()
p2.write_text(
textwrap.dedent(
"""\
[pytest]
name = wrong-value
should_not_be_set = true
"""
),
encoding="utf-8",
)
with MonkeyPatch.context() as mp:
mp.chdir(cwd)
config = Config.fromdictargs(option_dict, [])
inipath = absolutepath(inifilename)
assert config.args == [str(cwd)]
assert config.option.inifilename == inifilename
assert config.option.capture == "no"
# this indicates this is the file used for getting configuration values
assert config.inipath == inipath
assert config._inicfg.get("name") == ConfigValue(
"value", origin="file", mode="ini"
)
assert config._inicfg.get("should_not_be_set") is None
def test_options_on_small_file_do_not_blow_up(pytester: Pytester) -> None:
def runfiletest(opts: Sequence[str]) -> None:
reprec = pytester.inline_run(*opts)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 2
assert skipped == passed == 0
path = str(
pytester.makepyfile(
"""
def test_f1(): assert 0
def test_f2(): assert 0
"""
)
)
runfiletest([path])
runfiletest(["-l", path])
runfiletest(["-s", path])
runfiletest(["--tb=no", path])
runfiletest(["--tb=short", path])
runfiletest(["--tb=long", path])
runfiletest(["--fulltrace", path])
runfiletest(["--traceconfig", path])
runfiletest(["-v", path])
runfiletest(["-v", "-v", path])
def test_preparse_ordering_with_setuptools(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
class EntryPoint:
name = "mytestplugin"
group = "pytest11"
def load(self):
class PseudoPlugin:
x = 42
return PseudoPlugin()
class Dist:
files = ()
metadata = {"name": "foo"}
entry_points = (EntryPoint(),)
def my_dists():
return (Dist,)
monkeypatch.setattr(importlib.metadata, "distributions", my_dists)
pytester.makeconftest(
"""
pytest_plugins = "mytestplugin",
"""
)
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
config = pytester.parseconfig()
plugin = config.pluginmanager.getplugin("mytestplugin")
assert plugin.x == 42
def test_setuptools_importerror_issue1479(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
class DummyEntryPoint:
name = "mytestplugin"
group = "pytest11"
def load(self):
raise ImportError("Don't hide me!")
class Distribution:
version = "1.0"
files = ("foo.txt",)
metadata = {"name": "foo"}
entry_points = (DummyEntryPoint(),)
def distributions():
return (Distribution(),)
monkeypatch.setattr(importlib.metadata, "distributions", distributions)
with pytest.raises(ImportError):
pytester.parseconfig()
def test_importlib_metadata_broken_distribution(
pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
"""Integration test for broken distributions with 'files' metadata being None (#5389)"""
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
class DummyEntryPoint:
name = "mytestplugin"
group = "pytest11"
def load(self):
return object()
class Distribution:
version = "1.0"
files = None
metadata = {"name": "foo"}
entry_points = (DummyEntryPoint(),)
def distributions():
return (Distribution(),)
monkeypatch.setattr(importlib.metadata, "distributions", distributions)
pytester.parseconfig()
@pytest.mark.parametrize("block_it", [True, False])
def test_plugin_preparse_prevents_setuptools_loading(
pytester: Pytester, monkeypatch: MonkeyPatch, block_it: bool
) -> None:
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", raising=False)
plugin_module_placeholder = object()
class DummyEntryPoint:
name = "mytestplugin"
group = "pytest11"
def load(self):
return plugin_module_placeholder
class Distribution:
version = "1.0"
files = ("foo.txt",)
metadata = {"name": "foo"}
entry_points = (DummyEntryPoint(),)
def distributions():
return (Distribution(),)
monkeypatch.setattr(importlib.metadata, "distributions", distributions)
args = ("-p", "no:mytestplugin") if block_it else ()
config = pytester.parseconfig(*args)
config.pluginmanager.import_plugin("mytestplugin")
if block_it:
assert "mytestplugin" not in sys.modules
assert config.pluginmanager.get_plugin("mytestplugin") is None
else:
assert (
config.pluginmanager.get_plugin("mytestplugin") is plugin_module_placeholder
)
@pytest.mark.parametrize("disable_plugin_method", ["env_var", "flag", ""])
@pytest.mark.parametrize("enable_plugin_method", ["env_var", "flag", ""])
def test_disable_plugin_autoload(
pytester: Pytester,
monkeypatch: MonkeyPatch,
enable_plugin_method: str,
disable_plugin_method: str,
) -> None:
class DummyEntryPoint:
project_name = name = "mytestplugin"
group = "pytest11"
version = "1.0"
def load(self):
return sys.modules[self.name]
class Distribution:
metadata = {"name": "foo"}
entry_points = (DummyEntryPoint(),)
files = ()
class PseudoPlugin:
x = 42
attrs_used = []
def __getattr__(self, name):
assert name in ("__loader__", "__spec__")
self.attrs_used.append(name)
return object()
def distributions():
return (Distribution(),)
parse_args: list[str] = []
if disable_plugin_method == "env_var":
monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
elif disable_plugin_method == "flag":
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
parse_args.append("--disable-plugin-autoload")
else:
assert disable_plugin_method == ""
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
if enable_plugin_method == "env_var":
monkeypatch.setenv("PYTEST_PLUGINS", "mytestplugin")
elif enable_plugin_method == "flag":
parse_args.extend(["-p", "mytestplugin"])
else:
assert enable_plugin_method == ""
monkeypatch.setattr(importlib.metadata, "distributions", distributions)
monkeypatch.setitem(sys.modules, "mytestplugin", PseudoPlugin())
config = pytester.parseconfig(*parse_args)
has_loaded = config.pluginmanager.get_plugin("mytestplugin") is not None
# it should load if it's enabled, or we haven't disabled autoloading
assert has_loaded == (bool(enable_plugin_method) or not disable_plugin_method)
# The reason for the discrepancy between 'has_loaded' and __loader__ being accessed
# appears to be the monkeypatching of importlib.metadata.distributions; where
# files being empty means that _mark_plugins_for_rewrite doesn't find the plugin.
# But enable_method==flag ends up in mark_rewrite being called and __loader__
# being accessed.
assert ("__loader__" in PseudoPlugin.attrs_used) == (
has_loaded
and not (enable_plugin_method in ("env_var", "") and not disable_plugin_method)
)
# __spec__ is accessed in AssertionRewritingHook.exec_module, which would be
# eventually called if we did a full pytest run; but it's only accessed with
# enable_plugin_method=="env_var" because that will early-load it.
# Except when autoloads aren't disabled, in which case PytestPluginManager.import_plugin
# bails out before importing it.. because it knows it'll be loaded later?
# The above seems a bit weird, but I *think* it's true.
if platform.python_implementation() != "PyPy":
assert ("__spec__" in PseudoPlugin.attrs_used) == bool(
enable_plugin_method == "env_var" and disable_plugin_method
)
# __spec__ is present when testing locally on pypy, but not in CI ????
def test_plugin_loading_order(pytester: Pytester) -> None:
"""Test order of plugin loading with `-p`."""
p1 = pytester.makepyfile(
"""
def test_terminal_plugin(request):
import myplugin
assert myplugin.terminal_plugin == [False, True]
""",
myplugin="""
terminal_plugin = []
def pytest_configure(config):
terminal_plugin.append(bool(config.pluginmanager.get_plugin("terminalreporter")))
def pytest_sessionstart(session):
config = session.config
terminal_plugin.append(bool(config.pluginmanager.get_plugin("terminalreporter")))
""",
)
pytester.syspathinsert()
result = pytester.runpytest("-p", "myplugin", str(p1))
assert result.ret == 0
def test_invalid_options_show_extra_information(pytester: Pytester) -> None:
"""Display extra information when pytest exits due to unrecognized
options in the command-line."""
pytester.makeini(
"""
[pytest]
addopts = --invalid-option
"""
)
result = pytester.runpytest()
result.stderr.fnmatch_lines(
[
"*error: unrecognized arguments: --invalid-option*",
"* inifile: {}*".format(pytester.path.joinpath("tox.ini")),
f"* rootdir: {pytester.path}*",
]
)
@pytest.mark.parametrize(
"args",
[
["dir1", "dir2", "-v"],
["dir1", "-v", "dir2"],
["dir2", "-v", "dir1"],
["-v", "dir2", "dir1"],
],
)
def test_consider_args_after_options_for_rootdir(
pytester: Pytester, args: list[str]
) -> None:
"""
Consider all arguments in the command-line for rootdir
discovery, even if they happen to occur after an option. #949
"""
# replace "dir1" and "dir2" from "args" into their real directory
root = pytester.mkdir("myroot")
d1 = root.joinpath("dir1")
d1.mkdir()
d2 = root.joinpath("dir2")
d2.mkdir()
for i, arg in enumerate(args):
if arg == "dir1":
args[i] = str(d1)
elif arg == "dir2":
args[i] = str(d2)
with MonkeyPatch.context() as mp:
mp.chdir(root)
result = pytester.runpytest(*args)
result.stdout.fnmatch_lines(["*rootdir: *myroot"])
def test_toolongargs_issue224(pytester: Pytester) -> None:
result = pytester.runpytest("-m", "hello" * 500)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
def test_config_in_subdirectory_colon_command_line_issue2148(
pytester: Pytester,
) -> None:
conftest_source = """
def pytest_addoption(parser):
parser.addini('foo', 'foo')
"""
pytester.makefile(
".ini",
**{"pytest": "[pytest]\nfoo = root", "subdir/pytest": "[pytest]\nfoo = subdir"},
)
pytester.makepyfile(
**{
"conftest": conftest_source,
"subdir/conftest": conftest_source,
"subdir/test_foo": """\
def test_foo(pytestconfig):
assert pytestconfig.getini('foo') == 'subdir'
""",
}
)
result = pytester.runpytest("subdir/test_foo.py::test_foo")
assert result.ret == 0
def test_notify_exception(pytester: Pytester, capfd) -> None:
config = pytester.parseconfig()
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
_, err = capfd.readouterr()
assert "ValueError" in err
class A:
def pytest_internalerror(self):
return True
config.pluginmanager.register(A())
config.notify_exception(excinfo, config.option)
_, err = capfd.readouterr()
assert not err
config = pytester.parseconfig("-p", "no:terminal")
with pytest.raises(ValueError) as excinfo:
raise ValueError(1)
config.notify_exception(excinfo, config.option)
_, err = capfd.readouterr()
assert "ValueError" in err
def test_no_terminal_discovery_error(pytester: Pytester) -> None:
pytester.makepyfile("raise TypeError('oops!')")
result = pytester.runpytest("-p", "no:terminal", "--collect-only")
assert result.ret == ExitCode.INTERRUPTED
def test_load_initial_conftest_last_ordering(_config_for_test):
pm = _config_for_test.pluginmanager
class My:
def pytest_load_initial_conftests(self):
pass
m = My()
pm.register(m)
hc = pm.hook.pytest_load_initial_conftests
hookimpls = [
(
hookimpl.function.__module__,
"wrapper" if (hookimpl.wrapper or hookimpl.hookwrapper) else "nonwrapper",
)
for hookimpl in hc.get_hookimpls()
]
assert hookimpls == [
("_pytest.config", "nonwrapper"),
(m.__module__, "nonwrapper"),
("_pytest.legacypath", "nonwrapper"),
("_pytest.capture", "wrapper"),
("_pytest.warnings", "wrapper"),
]
def test_get_plugin_specs_as_list() -> None:
def exp_match(val: object) -> str:
return (
f"Plugins may be specified as a sequence or a ','-separated string "
f"of plugin names. Got: {re.escape(repr(val))}"
)
with pytest.raises(pytest.UsageError, match=exp_match({"foo"})):
_get_plugin_specs_as_list({"foo"}) # type: ignore[arg-type]
with pytest.raises(pytest.UsageError, match=exp_match({})):
_get_plugin_specs_as_list(dict()) # type: ignore[arg-type]
assert _get_plugin_specs_as_list(None) == []
assert _get_plugin_specs_as_list("") == []
assert _get_plugin_specs_as_list("foo") == ["foo"]
assert _get_plugin_specs_as_list("foo,bar") == ["foo", "bar"]
assert _get_plugin_specs_as_list(["foo", "bar"]) == ["foo", "bar"]
assert _get_plugin_specs_as_list(("foo", "bar")) == ["foo", "bar"]
def test_collect_pytest_prefix_bug_integration(pytester: Pytester) -> None:
"""Integration test for issue #3775"""
p = pytester.copy_example("config/collect_pytest_prefix")
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_collect_pytest_prefix_bug(pytestconfig):
"""Ensure we collect only actual functions from conftest files (#3775)"""
class Dummy:
class pytest_something:
pass
pm = pytestconfig.pluginmanager
assert pm.parse_hookimpl_opts(Dummy(), "pytest_something") is None
| TestConfigFromdictargs |
python | PyCQA__pylint | tests/functional/s/string/string_formatting.py | {
"start": 245,
"end": 341
} | class ____:
""" Has a __getattr__ """
def __getattr__(self, _):
return self
| Custom |
python | facebook__pyre-check | client/commands/infer.py | {
"start": 15671,
"end": 15769
} | class ____(FieldAnnotation):
parent: str
@dataclasses.dataclass(frozen=True)
| AttributeAnnotation |
python | tiangolo__fastapi | tests/test_compat_params_v1.py | {
"start": 509,
"end": 42328
} | class ____(BaseModel):
name: str
price: float
description: Optional[str] = None
app = FastAPI()
@app.get("/items/{item_id}")
def get_item_with_path(
item_id: Annotated[int, Path(title="The ID of the item", ge=1, le=1000)],
):
return {"item_id": item_id}
@app.get("/items/")
def get_items_with_query(
q: Annotated[
Optional[str], Query(min_length=3, max_length=50, pattern="^[a-zA-Z0-9 ]+$")
] = None,
skip: Annotated[int, Query(ge=0)] = 0,
limit: Annotated[int, Query(ge=1, le=100, examples=[5])] = 10,
):
return {"q": q, "skip": skip, "limit": limit}
@app.get("/users/")
def get_user_with_header(
x_custom: Annotated[Optional[str], Header()] = None,
x_token: Annotated[Optional[str], Header(convert_underscores=True)] = None,
):
return {"x_custom": x_custom, "x_token": x_token}
@app.get("/cookies/")
def get_cookies(
session_id: Annotated[Optional[str], Cookie()] = None,
tracking_id: Annotated[Optional[str], Cookie(min_length=10)] = None,
):
return {"session_id": session_id, "tracking_id": tracking_id}
@app.post("/items/")
def create_item(
item: Annotated[
Item,
Body(examples=[{"name": "Foo", "price": 35.4, "description": "The Foo item"}]),
],
):
return {"item": item}
@app.post("/items-embed/")
def create_item_embed(
item: Annotated[Item, Body(embed=True)],
):
return {"item": item}
@app.put("/items/{item_id}")
def update_item(
item_id: Annotated[int, Path(ge=1)],
item: Annotated[Item, Body()],
importance: Annotated[int, Body(gt=0, le=10)],
):
return {"item": item, "importance": importance}
@app.post("/form-data/")
def submit_form(
username: Annotated[str, Form(min_length=3, max_length=50)],
password: Annotated[str, Form(min_length=8)],
email: Annotated[Optional[str], Form()] = None,
):
return {"username": username, "password": password, "email": email}
@app.post("/upload/")
def upload_file(
file: Annotated[bytes, File()],
description: Annotated[Optional[str], Form()] = None,
):
return {"file_size": len(file), "description": description}
@app.post("/upload-multiple/")
def upload_multiple_files(
files: Annotated[List[bytes], File()],
note: Annotated[str, Form()] = "",
):
return {
"file_count": len(files),
"total_size": sum(len(f) for f in files),
"note": note,
}
client = TestClient(app)
# Path parameter tests
def test_path_param_valid():
response = client.get("/items/50")
assert response.status_code == 200
assert response.json() == {"item_id": 50}
def test_path_param_too_large():
response = client.get("/items/1001")
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["path", "item_id"]
def test_path_param_too_small():
response = client.get("/items/0")
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["path", "item_id"]
# Query parameter tests
def test_query_params_valid():
response = client.get("/items/?q=test search&skip=5&limit=20")
assert response.status_code == 200
assert response.json() == {"q": "test search", "skip": 5, "limit": 20}
def test_query_params_defaults():
response = client.get("/items/")
assert response.status_code == 200
assert response.json() == {"q": None, "skip": 0, "limit": 10}
def test_query_param_too_short():
response = client.get("/items/?q=ab")
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["query", "q"]
def test_query_param_invalid_pattern():
response = client.get("/items/?q=test@#$")
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["query", "q"]
def test_query_param_limit_too_large():
response = client.get("/items/?limit=101")
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["query", "limit"]
# Header parameter tests
def test_header_params():
response = client.get(
"/users/",
headers={"X-Custom": "Plumbus", "X-Token": "secret-token"},
)
assert response.status_code == 200
assert response.json() == {
"x_custom": "Plumbus",
"x_token": "secret-token",
}
def test_header_underscore_conversion():
response = client.get(
"/users/",
headers={"x-token": "secret-token-with-dash"},
)
assert response.status_code == 200
assert response.json()["x_token"] == "secret-token-with-dash"
def test_header_params_none():
response = client.get("/users/")
assert response.status_code == 200
assert response.json() == {"x_custom": None, "x_token": None}
# Cookie parameter tests
def test_cookie_params():
with TestClient(app) as client:
client.cookies.set("session_id", "abc123")
client.cookies.set("tracking_id", "1234567890abcdef")
response = client.get("/cookies/")
assert response.status_code == 200
assert response.json() == {
"session_id": "abc123",
"tracking_id": "1234567890abcdef",
}
def test_cookie_tracking_id_too_short():
with TestClient(app) as client:
client.cookies.set("tracking_id", "short")
response = client.get("/cookies/")
assert response.status_code == 422
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["cookie", "tracking_id"],
"msg": "ensure this value has at least 10 characters",
"type": "value_error.any_str.min_length",
"ctx": {"limit_value": 10},
}
]
}
)
def test_cookie_params_none():
response = client.get("/cookies/")
assert response.status_code == 200
assert response.json() == {"session_id": None, "tracking_id": None}
# Body parameter tests
def test_body_param():
response = client.post(
"/items/",
json={"name": "Test Item", "price": 29.99, "description": "A test item"},
)
assert response.status_code == 200
assert response.json() == {
"item": {
"name": "Test Item",
"price": 29.99,
"description": "A test item",
}
}
def test_body_param_minimal():
response = client.post(
"/items/",
json={"name": "Minimal", "price": 9.99},
)
assert response.status_code == 200
assert response.json() == {
"item": {"name": "Minimal", "price": 9.99, "description": None}
}
def test_body_param_missing_required():
response = client.post(
"/items/",
json={"name": "Incomplete"},
)
assert response.status_code == 422
error = response.json()["detail"][0]
assert error["loc"] == ["body", "price"]
def test_body_embed():
response = client.post(
"/items-embed/",
json={"item": {"name": "Embedded", "price": 15.0}},
)
assert response.status_code == 200
assert response.json() == {
"item": {"name": "Embedded", "price": 15.0, "description": None}
}
def test_body_embed_wrong_structure():
response = client.post(
"/items-embed/",
json={"name": "Not Embedded", "price": 15.0},
)
assert response.status_code == 422
# Multiple body parameters test
def test_multiple_body_params():
response = client.put(
"/items/5",
json={
"item": {"name": "Updated Item", "price": 49.99},
"importance": 8,
},
)
assert response.status_code == 200
assert response.json() == snapshot(
{
"item": {"name": "Updated Item", "price": 49.99, "description": None},
"importance": 8,
}
)
def test_multiple_body_params_importance_too_large():
response = client.put(
"/items/5",
json={
"item": {"name": "Item", "price": 10.0},
"importance": 11,
},
)
assert response.status_code == 422
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "importance"],
"msg": "ensure this value is less than or equal to 10",
"type": "value_error.number.not_le",
"ctx": {"limit_value": 10},
}
]
}
)
def test_multiple_body_params_importance_too_small():
response = client.put(
"/items/5",
json={
"item": {"name": "Item", "price": 10.0},
"importance": 0,
},
)
assert response.status_code == 422
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "importance"],
"msg": "ensure this value is greater than 0",
"type": "value_error.number.not_gt",
"ctx": {"limit_value": 0},
}
]
}
)
# Form parameter tests
def test_form_data_valid():
response = client.post(
"/form-data/",
data={
"username": "testuser",
"password": "password123",
"email": "test@example.com",
},
)
assert response.status_code == 200, response.text
assert response.json() == {
"username": "testuser",
"password": "password123",
"email": "test@example.com",
}
def test_form_data_optional_field():
response = client.post(
"/form-data/",
data={"username": "testuser", "password": "password123"},
)
assert response.status_code == 200
assert response.json() == {
"username": "testuser",
"password": "password123",
"email": None,
}
def test_form_data_username_too_short():
response = client.post(
"/form-data/",
data={"username": "ab", "password": "password123"},
)
assert response.status_code == 422
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "username"],
"msg": "ensure this value has at least 3 characters",
"type": "value_error.any_str.min_length",
"ctx": {"limit_value": 3},
}
]
}
)
def test_form_data_password_too_short():
response = client.post(
"/form-data/",
data={"username": "testuser", "password": "short"},
)
assert response.status_code == 422
assert response.json() == snapshot(
{
"detail": [
{
"loc": ["body", "password"],
"msg": "ensure this value has at least 8 characters",
"type": "value_error.any_str.min_length",
"ctx": {"limit_value": 8},
}
]
}
)
# File upload tests
def test_upload_file():
response = client.post(
"/upload/",
files={"file": ("test.txt", b"Hello, World!", "text/plain")},
data={"description": "A test file"},
)
assert response.status_code == 200
assert response.json() == {
"file_size": 13,
"description": "A test file",
}
def test_upload_file_without_description():
response = client.post(
"/upload/",
files={"file": ("test.txt", b"Hello!", "text/plain")},
)
assert response.status_code == 200
assert response.json() == {
"file_size": 6,
"description": None,
}
def test_upload_multiple_files():
response = client.post(
"/upload-multiple/",
files=[
("files", ("file1.txt", b"Content 1", "text/plain")),
("files", ("file2.txt", b"Content 2", "text/plain")),
("files", ("file3.txt", b"Content 3", "text/plain")),
],
data={"note": "Multiple files uploaded"},
)
assert response.status_code == 200
assert response.json() == {
"file_count": 3,
"total_size": 27,
"note": "Multiple files uploaded",
}
def test_upload_multiple_files_empty_note():
response = client.post(
"/upload-multiple/",
files=[
("files", ("file1.txt", b"Test", "text/plain")),
],
)
assert response.status_code == 200
assert response.json()["file_count"] == 1
assert response.json()["note"] == ""
# __repr__ tests
def test_query_repr():
query_param = Query(default=None, min_length=3)
assert repr(query_param) == "Query(None)"
def test_body_repr():
body_param = Body(default=None)
assert repr(body_param) == "Body(None)"
# Deprecation warning tests for regex parameter
def test_query_regex_deprecation_warning():
with pytest.warns(DeprecationWarning, match="`regex` has been deprecated"):
Query(regex="^test$")
def test_body_regex_deprecation_warning():
with pytest.warns(DeprecationWarning, match="`regex` has been deprecated"):
Body(regex="^test$")
# Deprecation warning tests for example parameter
def test_query_example_deprecation_warning():
with pytest.warns(DeprecationWarning, match="`example` has been deprecated"):
Query(example="test example")
def test_body_example_deprecation_warning():
with pytest.warns(DeprecationWarning, match="`example` has been deprecated"):
Body(example={"test": "example"})
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == snapshot(
{
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"get": {
"summary": "Get Item With Path",
"operationId": "get_item_with_path_items__item_id__get",
"parameters": [
{
"name": "item_id",
"in": "path",
"required": True,
"schema": {
"title": "The ID of the item",
"minimum": 1,
"maximum": 1000,
"type": "integer",
},
}
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
"put": {
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"name": "item_id",
"in": "path",
"required": True,
"schema": {
"title": "Item Id",
"minimum": 1,
"type": "integer",
},
}
],
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": pydantic_snapshot(
v1=snapshot(
{
"$ref": "#/components/schemas/Body_update_item_items__item_id__put"
}
),
v2=snapshot(
{
"title": "Body",
"allOf": [
{
"$ref": "#/components/schemas/Body_update_item_items__item_id__put"
}
],
}
),
),
}
},
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
},
"/items/": {
"get": {
"summary": "Get Items With Query",
"operationId": "get_items_with_query_items__get",
"parameters": [
{
"name": "q",
"in": "query",
"required": False,
"schema": {
"title": "Q",
"maxLength": 50,
"minLength": 3,
"pattern": "^[a-zA-Z0-9 ]+$",
"type": "string",
},
},
{
"name": "skip",
"in": "query",
"required": False,
"schema": {
"title": "Skip",
"default": 0,
"minimum": 0,
"type": "integer",
},
},
{
"name": "limit",
"in": "query",
"required": False,
"schema": {
"title": "Limit",
"default": 10,
"minimum": 1,
"maximum": 100,
"examples": [5],
"type": "integer",
},
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
"post": {
"summary": "Create Item",
"operationId": "create_item_items__post",
"requestBody": {
"required": True,
"content": {
"application/json": {
"schema": {
"title": "Item",
"examples": [
{
"name": "Foo",
"price": 35.4,
"description": "The Foo item",
}
],
"allOf": [
{"$ref": "#/components/schemas/Item"}
],
}
}
},
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
},
"/users/": {
"get": {
"summary": "Get User With Header",
"operationId": "get_user_with_header_users__get",
"parameters": [
{
"name": "x-custom",
"in": "header",
"required": False,
"schema": {"title": "X-Custom", "type": "string"},
},
{
"name": "x-token",
"in": "header",
"required": False,
"schema": {"title": "X-Token", "type": "string"},
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/cookies/": {
"get": {
"summary": "Get Cookies",
"operationId": "get_cookies_cookies__get",
"parameters": [
{
"name": "session_id",
"in": "cookie",
"required": False,
"schema": {"title": "Session Id", "type": "string"},
},
{
"name": "tracking_id",
"in": "cookie",
"required": False,
"schema": {
"title": "Tracking Id",
"minLength": 10,
"type": "string",
},
},
],
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/items-embed/": {
"post": {
"summary": "Create Item Embed",
"operationId": "create_item_embed_items_embed__post",
"requestBody": {
"content": {
"application/json": {
"schema": pydantic_snapshot(
v1=snapshot(
{
"$ref": "#/components/schemas/Body_create_item_embed_items_embed__post"
}
),
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Body_create_item_embed_items_embed__post"
}
],
"title": "Body",
}
),
),
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/form-data/": {
"post": {
"summary": "Submit Form",
"operationId": "submit_form_form_data__post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": pydantic_snapshot(
v1=snapshot(
{
"$ref": "#/components/schemas/Body_submit_form_form_data__post"
}
),
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Body_submit_form_form_data__post"
}
],
"title": "Body",
}
),
),
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/upload/": {
"post": {
"summary": "Upload File",
"operationId": "upload_file_upload__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": pydantic_snapshot(
v1=snapshot(
{
"$ref": "#/components/schemas/Body_upload_file_upload__post"
}
),
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Body_upload_file_upload__post"
}
],
"title": "Body",
}
),
),
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/upload-multiple/": {
"post": {
"summary": "Upload Multiple Files",
"operationId": "upload_multiple_files_upload_multiple__post",
"requestBody": {
"content": {
"multipart/form-data": {
"schema": pydantic_snapshot(
v1=snapshot(
{
"$ref": "#/components/schemas/Body_upload_multiple_files_upload_multiple__post"
}
),
v2=snapshot(
{
"allOf": [
{
"$ref": "#/components/schemas/Body_upload_multiple_files_upload_multiple__post"
}
],
"title": "Body",
}
),
),
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_create_item_embed_items_embed__post": {
"properties": pydantic_snapshot(
v1=snapshot(
{"item": {"$ref": "#/components/schemas/Item"}}
),
v2=snapshot(
{
"item": {
"allOf": [
{"$ref": "#/components/schemas/Item"}
],
"title": "Item",
}
}
),
),
"type": "object",
"required": ["item"],
"title": "Body_create_item_embed_items_embed__post",
},
"Body_submit_form_form_data__post": {
"properties": {
"username": {
"type": "string",
"maxLength": 50,
"minLength": 3,
"title": "Username",
},
"password": {
"type": "string",
"minLength": 8,
"title": "Password",
},
"email": {"type": "string", "title": "Email"},
},
"type": "object",
"required": ["username", "password"],
"title": "Body_submit_form_form_data__post",
},
"Body_update_item_items__item_id__put": {
"properties": {
"item": pydantic_snapshot(
v1=snapshot({"$ref": "#/components/schemas/Item"}),
v2=snapshot(
{
"allOf": [
{"$ref": "#/components/schemas/Item"}
],
"title": "Item",
}
),
),
"importance": {
"type": "integer",
"maximum": 10.0,
"exclusiveMinimum": 0.0,
"title": "Importance",
},
},
"type": "object",
"required": ["item", "importance"],
"title": "Body_update_item_items__item_id__put",
},
"Body_upload_file_upload__post": {
"properties": {
"file": {
"type": "string",
"format": "binary",
"title": "File",
},
"description": {"type": "string", "title": "Description"},
},
"type": "object",
"required": ["file"],
"title": "Body_upload_file_upload__post",
},
"Body_upload_multiple_files_upload_multiple__post": {
"properties": {
"files": {
"items": {"type": "string", "format": "binary"},
"type": "array",
"title": "Files",
},
"note": {"type": "string", "title": "Note", "default": ""},
},
"type": "object",
"required": ["files"],
"title": "Body_upload_multiple_files_upload_multiple__post",
},
"HTTPValidationError": {
"properties": {
"detail": {
"items": {
"$ref": "#/components/schemas/ValidationError"
},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"price": {"type": "number", "title": "Price"},
"description": {"type": "string", "title": "Description"},
},
"type": "object",
"required": ["name", "price"],
"title": "Item",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
}
)
| Item |
python | sphinx-doc__sphinx | sphinx/util/docutils.py | {
"start": 24315,
"end": 30759
} | class ____(nodes.NodeVisitor):
"""A base class for Sphinx translators.
This class adds a support for visitor/departure method for super node class
if visitor/departure method for node class is not found.
It also provides helper methods for Sphinx translators.
.. versionadded:: 2.0
.. note:: The subclasses of this class might not work with docutils.
This class is strongly coupled with Sphinx.
"""
def __init__(self, document: nodes.document, builder: Builder) -> None:
super().__init__(document)
self.builder = builder
self.config = builder.config
self.settings = document.settings
self._domains = builder.env.domains
def dispatch_visit(self, node: Node) -> None:
"""Dispatch node to appropriate visitor method.
The priority of visitor method is:
1. ``self.visit_{node_class}()``
2. ``self.visit_{super_node_class}()``
3. ``self.unknown_visit()``
"""
for node_class in node.__class__.__mro__:
method = getattr(self, 'visit_%s' % node_class.__name__, None)
if method:
method(node)
break
else:
super().dispatch_visit(node)
def dispatch_departure(self, node: Node) -> None:
"""Dispatch node to appropriate departure method.
The priority of departure method is:
1. ``self.depart_{node_class}()``
2. ``self.depart_{super_node_class}()``
3. ``self.unknown_departure()``
"""
for node_class in node.__class__.__mro__:
method = getattr(self, 'depart_%s' % node_class.__name__, None)
if method:
method(node)
break
else:
super().dispatch_departure(node)
def unknown_visit(self, node: Node) -> None:
logger.warning(__('unknown node type: %r'), node, location=node)
# cache a vanilla instance of nodes.document
# Used in new_document() function
__document_cache__: tuple[Values, Reporter]
def new_document(source_path: str, settings: Any = None) -> nodes.document:
"""Return a new empty document object. This is an alternative of docutils'.
This is a simple wrapper for ``docutils.utils.new_document()``. It
caches the result of docutils' and use it on second call for instantiation.
This makes an instantiation of document nodes much faster.
"""
global __document_cache__ # NoQA: PLW0603
try:
cached_settings, reporter = __document_cache__
except NameError:
doc = docutils.utils.new_document(source_path)
__document_cache__ = cached_settings, reporter = doc.settings, doc.reporter
if settings is None:
# Make a copy of the cached settings to accelerate instantiation
settings = copy(cached_settings)
# Create a new instance of nodes.document using cached reporter
document = nodes.document(settings, reporter, source=source_path)
document.note_source(source_path, -1)
return document
def _parse_str_to_doctree(
content: str,
*,
filename: Path,
default_role: str = '',
default_settings: Mapping[str, Any],
env: BuildEnvironment,
events: EventManager | None = None,
parser: Parser,
transforms: Sequence[type[Transform]] = (),
) -> nodes.document:
env.current_document._parser = parser
# Propagate exceptions by default when used programmatically:
defaults = {'traceback': True, **default_settings}
settings = _get_settings(
standalone.Reader, parser, defaults=defaults, read_config_files=True
)
settings._source = str(filename)
# Create root document node
reporter = LoggingReporter(
source=str(filename),
report_level=settings.report_level,
halt_level=settings.halt_level,
debug=settings.debug,
error_handler=settings.error_encoding_error_handler,
)
document = nodes.document(settings, reporter, source=str(filename))
document.note_source(str(filename), -1)
# substitute transformer
document.transformer = transformer = SphinxTransformer(document)
transformer.add_transforms(_READER_TRANSFORMS)
transformer.add_transforms(transforms)
transformer.add_transforms(parser.get_transforms())
if default_role:
default_role_cm = rst.default_role(env.current_document.docname, default_role)
else:
default_role_cm = nullcontext() # type: ignore[assignment]
with sphinx_domains(env), default_role_cm:
# TODO: Move the stanza below to Builder.read_doc(), within
# a sphinx_domains() context manager.
# This will require changes to IntersphinxDispatcher and/or
# CustomReSTDispatcher.
if events is not None:
# emit "source-read" event
arg = [content]
events.emit('source-read', env.current_document.docname, arg)
content = arg[0]
# parse content to abstract syntax tree
parser.parse(content, document)
document.current_source = document.current_line = None
# run transforms
transformer.apply_transforms()
return document
def _get_settings(
*components: Component | type[Component],
defaults: Mapping[str, Any],
read_config_files: bool = False,
) -> Values:
with warnings.catch_warnings(action='ignore', category=DeprecationWarning):
# DeprecationWarning: The frontend.OptionParser class will be replaced
# by a subclass of argparse.ArgumentParser in Docutils 0.21 or later.
# DeprecationWarning: The frontend.Option class will be removed
# in Docutils 0.21 or later.
option_parser = OptionParser(
components=components,
defaults=defaults,
read_config_files=read_config_files,
)
return option_parser.get_default_values() # type: ignore[return-value]
if docutils.__version_info__[:2] < (0, 22):
from docutils.parsers.rst.roles import set_classes
def _normalize_options(options: dict[str, Any] | None) -> dict[str, Any]:
if options is None:
return {}
n_options = options.copy()
set_classes(n_options)
return n_options
else:
from docutils.parsers.rst.roles import ( # type: ignore[attr-defined, no-redef]
normalize_options as _normalize_options, # NoQA: F401
)
| SphinxTranslator |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 17245,
"end": 18101
} | class ____(sgqlc.types.Enum):
"""The possible values for the enterprise base repository permission
setting.
Enumeration Choices:
* `ADMIN`: Organization members will be able to clone, pull, push,
and add new collaborators to all organization repositories.
* `NONE`: Organization members will only be able to clone and pull
public repositories.
* `NO_POLICY`: Organizations in the enterprise choose base
repository permissions for their members.
* `READ`: Organization members will be able to clone and pull all
organization repositories.
* `WRITE`: Organization members will be able to clone, pull, and
push all organization repositories.
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "NONE", "NO_POLICY", "READ", "WRITE")
| EnterpriseDefaultRepositoryPermissionSettingValue |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 35437,
"end": 35713
} | class ____(Dataset):
def __init__(self, length):
self.length = length
def __getitem__(self, indices):
assert isinstance(indices, (list, tuple))
return torch.as_tensor(indices)
def __len__(self):
return self.length
| BulkLoadingDataset |
python | python-openxml__python-docx | src/docx/opc/constants.py | {
"start": 8549,
"end": 9087
} | class ____:
"""Constant values for OPC XML namespaces."""
DML_WORDPROCESSING_DRAWING = (
"http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing"
)
OFC_RELATIONSHIPS = "http://schemas.openxmlformats.org/officeDocument/2006/relationships"
OPC_RELATIONSHIPS = "http://schemas.openxmlformats.org/package/2006/relationships"
OPC_CONTENT_TYPES = "http://schemas.openxmlformats.org/package/2006/content-types"
WML_MAIN = "http://schemas.openxmlformats.org/wordprocessingml/2006/main"
| NAMESPACE |
python | django__django | tests/auth_tests/test_mixins.py | {
"start": 618,
"end": 726
} | class ____(View):
def get(self, request, *args, **kwargs):
return HttpResponse()
| EmptyResponseView |
python | scipy__scipy | scipy/integrate/_ivp/bdf.py | {
"start": 1944,
"end": 16821
} | class ____(OdeSolver):
"""Implicit method based on backward-differentiation formulas.
This is a variable order method with the order varying automatically from
1 to 5. The general framework of the BDF algorithm is described in [1]_.
This class implements a quasi-constant step size as explained in [2]_.
The error estimation strategy for the constant-step BDF is derived in [3]_.
An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
Can be applied in the complex domain.
Parameters
----------
fun : callable
Right-hand side of the system: the time derivative of the state ``y``
at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
return an array of the same shape as ``y``. See `vectorized` for more
information.
t0 : float
Initial time.
y0 : array_like, shape (n,)
Initial state.
t_bound : float
Boundary time - the integration won't continue beyond it. It also
determines the direction of the integration.
first_step : float or None, optional
Initial step size. Default is ``None`` which means that the algorithm
should choose.
max_step : float, optional
Maximum allowed step size. Default is np.inf, i.e., the step size is not
bounded and determined solely by the solver.
rtol, atol : float and array_like, optional
Relative and absolute tolerances. The solver keeps the local error
estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
relative accuracy (number of correct digits), while `atol` controls
absolute accuracy (number of correct decimal places). To achieve the
desired `rtol`, set `atol` to be smaller than the smallest value that
can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
allowable error. If `atol` is larger than ``rtol * abs(y)`` the
number of correct digits is not guaranteed. Conversely, to achieve the
desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
than `atol`. If components of y have different scales, it might be
beneficial to set different `atol` values for different components by
passing array_like with shape (n,) for `atol`. Default values are
1e-3 for `rtol` and 1e-6 for `atol`.
jac : {None, array_like, sparse_matrix, callable}, optional
Jacobian matrix of the right-hand side of the system with respect to y,
required by this method. The Jacobian matrix has shape (n, n) and its
element (i, j) is equal to ``d f_i / d y_j``.
There are three ways to define the Jacobian:
* If array_like or sparse_matrix, the Jacobian is assumed to
be constant.
* If callable, the Jacobian is assumed to depend on both
t and y; it will be called as ``jac(t, y)`` as necessary.
For the 'Radau' and 'BDF' methods, the return value might be a
sparse matrix.
* If None (default), the Jacobian will be approximated by
finite differences.
It is generally recommended to provide the Jacobian rather than
relying on a finite-difference approximation.
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines a sparsity structure of the Jacobian matrix for a
finite-difference approximation. Its shape must be (n, n). This argument
is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
elements in *each* row, providing the sparsity structure will greatly
speed up the computations [4]_. A zero entry means that a corresponding
element in the Jacobian is always zero. If None (default), the Jacobian
is assumed to be dense.
vectorized : bool, optional
Whether `fun` can be called in a vectorized fashion. Default is False.
If ``vectorized`` is False, `fun` will always be called with ``y`` of
shape ``(n,)``, where ``n = len(y0)``.
If ``vectorized`` is True, `fun` may be called with ``y`` of shape
``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
the returned array is the time derivative of the state corresponding
with a column of ``y``).
Setting ``vectorized=True`` allows for faster finite difference
approximation of the Jacobian by this method, but may result in slower
execution overall in some circumstances (e.g. small ``len(y0)``).
Attributes
----------
n : int
Number of equations.
status : string
Current status of the solver: 'running', 'finished' or 'failed'.
t_bound : float
Boundary time.
direction : float
Integration direction: +1 or -1.
t : float
Current time.
y : ndarray
Current state.
t_old : float
Previous time. None if no steps were made yet.
step_size : float
Size of the last successful step. None if no steps were made yet.
nfev : int
Number of evaluations of the right-hand side.
njev : int
Number of evaluations of the Jacobian.
nlu : int
Number of LU decompositions.
References
----------
.. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
Solution of Ordinary Differential Equations", ACM Transactions on
Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
.. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
.. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
Nonstiff Problems", Sec. III.2.
.. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of Mathematics
and its Applications, 13, pp. 117-120, 1974.
"""
def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
vectorized=False, first_step=None, **extraneous):
warn_extraneous(extraneous)
super().__init__(fun, t0, y0, t_bound, vectorized,
support_complex=True)
self.max_step = validate_max_step(max_step)
self.rtol, self.atol = validate_tol(rtol, atol, self.n)
f = self.fun(self.t, self.y)
if first_step is None:
self.h_abs = select_initial_step(self.fun, self.t, self.y,
t_bound, max_step, f,
self.direction, 1,
self.rtol, self.atol)
else:
self.h_abs = validate_first_step(first_step, t0, t_bound)
self.h_abs_old = None
self.error_norm_old = None
self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
self.jac_factor = None
self.jac, self.J = self._validate_jac(jac, jac_sparsity)
if issparse(self.J):
def lu(A):
self.nlu += 1
return splu(A)
def solve_lu(LU, b):
return LU.solve(b)
I = eye(self.n, format='csc', dtype=self.y.dtype)
else:
def lu(A):
self.nlu += 1
return lu_factor(A, overwrite_a=True)
def solve_lu(LU, b):
return lu_solve(LU, b, overwrite_b=True)
I = np.identity(self.n, dtype=self.y.dtype)
self.lu = lu
self.solve_lu = solve_lu
self.I = I
kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
self.alpha = (1 - kappa) * self.gamma
self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
D[0] = self.y
D[1] = f * self.h_abs * self.direction
self.D = D
self.order = 1
self.n_equal_steps = 0
self.LU = None
def _validate_jac(self, jac, sparsity):
t0 = self.t
y0 = self.y
if jac is None:
if sparsity is not None:
if issparse(sparsity):
sparsity = csc_matrix(sparsity)
groups = group_columns(sparsity)
sparsity = (sparsity, groups)
def jac_wrapped(t, y):
self.njev += 1
f = self.fun_single(t, y)
J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
self.atol, self.jac_factor,
sparsity)
return J
J = jac_wrapped(t0, y0)
elif callable(jac):
J = jac(t0, y0)
self.njev += 1
if issparse(J):
J = csc_matrix(J, dtype=y0.dtype)
def jac_wrapped(t, y):
self.njev += 1
return csc_matrix(jac(t, y), dtype=y0.dtype)
else:
J = np.asarray(J, dtype=y0.dtype)
def jac_wrapped(t, y):
self.njev += 1
return np.asarray(jac(t, y), dtype=y0.dtype)
if J.shape != (self.n, self.n):
raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)},"
f" but actually has {J.shape}.")
else:
if issparse(jac):
J = csc_matrix(jac, dtype=y0.dtype)
else:
J = np.asarray(jac, dtype=y0.dtype)
if J.shape != (self.n, self.n):
raise ValueError(f"`jac` is expected to have shape {(self.n, self.n)},"
f" but actually has {J.shape}.")
jac_wrapped = None
return jac_wrapped, J
def _step_impl(self):
t = self.t
D = self.D
max_step = self.max_step
min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
if self.h_abs > max_step:
h_abs = max_step
change_D(D, self.order, max_step / self.h_abs)
self.n_equal_steps = 0
elif self.h_abs < min_step:
h_abs = min_step
change_D(D, self.order, min_step / self.h_abs)
self.n_equal_steps = 0
else:
h_abs = self.h_abs
atol = self.atol
rtol = self.rtol
order = self.order
alpha = self.alpha
gamma = self.gamma
error_const = self.error_const
J = self.J
LU = self.LU
current_jac = self.jac is None
step_accepted = False
while not step_accepted:
if h_abs < min_step:
return False, self.TOO_SMALL_STEP
h = h_abs * self.direction
t_new = t + h
if self.direction * (t_new - self.t_bound) > 0:
t_new = self.t_bound
change_D(D, order, np.abs(t_new - t) / h_abs)
self.n_equal_steps = 0
LU = None
h = t_new - t
h_abs = np.abs(h)
y_predict = np.sum(D[:order + 1], axis=0)
scale = atol + rtol * np.abs(y_predict)
psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
converged = False
c = h / alpha[order]
while not converged:
if LU is None:
LU = self.lu(self.I - c * J)
converged, n_iter, y_new, d = solve_bdf_system(
self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
scale, self.newton_tol)
if not converged:
if current_jac:
break
J = self.jac(t_new, y_predict)
LU = None
current_jac = True
if not converged:
factor = 0.5
h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
LU = None
continue
safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
+ n_iter)
scale = atol + rtol * np.abs(y_new)
error = error_const[order] * d
error_norm = norm(error / scale)
if error_norm > 1:
factor = max(MIN_FACTOR,
safety * error_norm ** (-1 / (order + 1)))
h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
# As we didn't have problems with convergence, we don't
# reset LU here.
else:
step_accepted = True
self.n_equal_steps += 1
self.t = t_new
self.y = y_new
self.h_abs = h_abs
self.J = J
self.LU = LU
# Update differences. The principal relation here is
# D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
# contained difference for previous interpolating polynomial and
# d = D^{k + 1} y_n. Thus this elegant code follows.
D[order + 2] = d - D[order + 1]
D[order + 1] = d
for i in reversed(range(order + 1)):
D[i] += D[i + 1]
if self.n_equal_steps < order + 1:
return True, None
if order > 1:
error_m = error_const[order - 1] * D[order]
error_m_norm = norm(error_m / scale)
else:
error_m_norm = np.inf
if order < MAX_ORDER:
error_p = error_const[order + 1] * D[order + 2]
error_p_norm = norm(error_p / scale)
else:
error_p_norm = np.inf
error_norms = np.array([error_m_norm, error_norm, error_p_norm])
with np.errstate(divide='ignore'):
factors = error_norms ** (-1 / np.arange(order, order + 3))
delta_order = np.argmax(factors) - 1
order += delta_order
self.order = order
factor = min(MAX_FACTOR, safety * np.max(factors))
self.h_abs *= factor
change_D(D, order, factor)
self.n_equal_steps = 0
self.LU = None
return True, None
def _dense_output_impl(self):
return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
self.order, self.D[:self.order + 1].copy())
| BDF |
python | pytorch__pytorch | torch/_dynamo/create_parameter_op.py | {
"start": 840,
"end": 2561
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(ctx: Any, tensor: Any, placeholder: Any) -> torch.nn.Parameter:
assert not tensor.requires_grad
return placeholder.set_(tensor)
@staticmethod
def backward(ctx: Any, *grad_outputs: torch.Tensor) -> tuple[None, torch.Tensor]:
grad = grad_outputs[0]
return None, grad # grad flows to placeholder
def tracable_create_parameter(
tensor: torch.Tensor, placeholder: torch.nn.Parameter
) -> torch.nn.Parameter:
with torch.set_grad_enabled(placeholder.requires_grad):
out = TracableCreateParameter.apply(tensor, placeholder)
return out
def new_parameter_placeholder(
size: tuple[int, ...], dtype: torch.dtype, device: torch.device, requires_grad: bool
) -> torch.nn.Parameter:
"""Create a placeholder to be passed to the above functions"""
result = torch.nn.Parameter(
torch.empty(size, dtype=dtype, device=device), requires_grad=requires_grad
)
# TODO(jansel): alloc followed by free is inefficient, need a way to allocate an unbacked tensor.
# Allocating a zero tensor would causes assert failures in autograd.
result.untyped_storage().resize_(0)
return result
_TLS = threading.local()
@contextmanager
def do_not_convert_to_tracable_parameter() -> Generator[bool, None, None]:
old_flag = getattr(_TLS, "convert_tracable_parameter", True)
_TLS.convert_tracable_parameter = False
try:
yield False
finally:
_TLS.convert_tracable_parameter = old_flag
def can_convert_to_tracable_parameter() -> bool:
return getattr(_TLS, "convert_tracable_parameter", True)
| TracableCreateParameter |
python | huggingface__transformers | tests/models/esm/test_modeling_esm.py | {
"start": 13225,
"end": 15295
} | class ____(TestCasePlus):
def test_inference_masked_lm(self):
with torch.no_grad():
model = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]])
output = model(input_ids)[0]
vocab_size = 33
expected_shape = torch.Size((1, 6, vocab_size))
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
def test_inference_no_head(self):
with torch.no_grad():
model = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D")
model.eval()
input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
output = model(input_ids)[0]
# compare the actual values for a slice.
expected_slice = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]]
)
torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_bitsandbytes
def test_inference_bitsandbytes(self):
model = EsmForMaskedLM.from_pretrained(
"facebook/esm2_t36_3B_UR50D", quantization_config=BitsAndBytesConfig(load_in_8bit=True)
)
input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]).to(model.device)
# Just test if inference works
with torch.no_grad():
_ = model(input_ids)[0]
model = EsmForMaskedLM.from_pretrained(
"facebook/esm2_t36_3B_UR50D", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]).to(model.device)
# Just test if inference works
_ = model(input_ids)[0]
| EsmModelIntegrationTest |
python | joke2k__faker | tests/providers/test_bank.py | {
"start": 6767,
"end": 7434
} | class ____:
"""Test ru_RU bank provider"""
def test_bic(self, faker, num_samples):
for _ in range(num_samples):
assert re.match(r"04\d{7,9}", faker.bic())
def test_correspondent_account(self, faker, num_samples):
for _ in range(num_samples):
assert re.match(r"301\d{17}", faker.correspondent_account())
def test_checking_account(self, faker, num_samples):
for _ in range(num_samples):
assert re.match(r"\d{3}0\d{16}", faker.checking_account())
def test_bank(self, faker, num_samples):
for _ in range(num_samples):
assert re.match(r"\D{3,41}", faker.bank())
| TestRuRu |
python | getsentry__sentry | src/sentry/api/endpoints/builtin_symbol_sources.py | {
"start": 548,
"end": 1028
} | class ____(Endpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = ()
def get(self, request: Request, **kwargs) -> Response:
sources = [
normalize_symbol_source(key, source)
for key, source in settings.SENTRY_BUILTIN_SOURCES.items()
]
sources.sort(key=lambda s: s["name"])
return Response(serialize(sources))
| BuiltinSymbolSourcesEndpoint |
python | ApeWorX__ape | tests/functional/conftest.py | {
"start": 6057,
"end": 11658
} | class ____(ContractLogicError):
pass
@pytest.hookimpl(trylast=True, hookwrapper=True)
def pytest_collection_finish(session):
with ape.networks.parse_network_choice("::test"):
# Sets the active provider
yield
@pytest.fixture
def mock_web3(mocker):
return mocker.MagicMock()
@pytest.fixture
def mock_transaction(mocker):
tx = mocker.MagicMock()
tx.required_confirmations = 0
return tx
@pytest.fixture(scope="session")
def address():
return TEST_ADDRESS
@pytest.fixture
def second_keyfile_account(sender, keyparams, temp_keyfile_account_ctx):
with temp_keyfile_account_ctx(ALIAS_2, keyparams, sender) as account:
# Ensure starts off locked.
account.lock()
yield account
@pytest.fixture
def solidity_contract_instance(owner, project, networks_connected_to_tester) -> "ContractInstance":
return owner.deploy(project.SolidityContract, 0)
@pytest.fixture
def vyper_contract_instance(owner, project, networks_connected_to_tester) -> "ContractInstance":
return owner.deploy(project.VyperContract, 0, required_confirmations=0)
@pytest.fixture
def solidity_fallback_contract(owner, project):
return owner.deploy(project.SolFallbackAndReceive)
@pytest.fixture
def vyper_fallback_contract(owner, project):
return owner.deploy(project.VyDefault)
@pytest.fixture
def reverts_contract_instance(
owner, project, sub_reverts_contract_instance, eth_tester_provider
) -> "ContractInstance":
return owner.deploy(
project.RevertsContract, sub_reverts_contract_instance, required_confirmations=0
)
@pytest.fixture(params=("solidity", "vyper"))
def contract_container(request, project, networks_connected_to_tester):
return project.SolidityContract if request.param == "solidity" else project.VyperContract
@pytest.fixture(params=("solidity", "vyper"))
def contract_instance(
eth_tester_provider, request, solidity_contract_instance, vyper_contract_instance
):
return solidity_contract_instance if request.param == "solidity" else vyper_contract_instance
@pytest.fixture(params=("solidity", "vyper"))
def fallback_contract(
eth_tester_provider, request, solidity_fallback_contract, vyper_fallback_contract
):
return solidity_fallback_contract if request.param == "solidity" else vyper_fallback_contract
@pytest.fixture
def ds_note_test_contract(project, eth_tester_provider, owner):
return project.DSNoteTest.deploy(sender=owner)
@pytest.fixture(scope="session")
def project_with_contract():
with ape.Project(APE_PROJECT_FOLDER).isolate_in_tempdir() as project:
yield project
@pytest.fixture
def clean_contract_caches(chain):
with chain.contracts.use_temporary_caches():
yield
@pytest.fixture
def project_with_dependency_config(empty_project):
dependencies_config = {
"contracts_folder": "functional/data/contracts/local",
"dependencies": [
{
"local": str(PROJECT_WITH_LONG_CONTRACTS_FOLDER),
"name": "testdependency",
"config_override": {
"contracts_folder": "source/v0.1",
},
"version": "releases/v6", # Testing having a slash in version.
}
],
}
empty_project.clean()
# NOTE: Use empty project because it is faster to compile.
with empty_project.isolate_in_tempdir(**dependencies_config) as tmp_project:
yield tmp_project
@pytest.fixture(scope="session")
def base_projects_directory():
return BASE_PROJECTS_DIRECTORY
@pytest.fixture(scope="session")
def mainnet_contract(chain):
def contract_getter(address):
path = (
Path(__file__).parent
/ "data"
/ "contracts"
/ "ethereum"
/ "mainnet"
/ f"{address}.json"
)
contract = ContractType.model_validate_json(path.read_text())
chain.contracts[address] = contract
return contract
return contract_getter
@pytest.fixture(scope="session")
def ds_note():
return {
"address": "0x35D1b3F3D7966A1DFe207aa4514C12a259A0492B",
"topics": [
HexBytes("0x7608870300000000000000000000000000000000000000000000000000000000"),
HexBytes("0x5946492d41000000000000000000000000000000000000000000000000000000"),
HexBytes("0x0000000000000000000000000abb839063ef747c8432b2acc60bf8f70ec09a45"),
HexBytes("0x0000000000000000000000000abb839063ef747c8432b2acc60bf8f70ec09a45"),
],
"data": "0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000e0760887035946492d410000000000000000000000000000000000000000000000000000000000000000000000000000000abb839063ef747c8432b2acc60bf8f70ec09a450000000000000000000000000abb839063ef747c8432b2acc60bf8f70ec09a450000000000000000000000000abb839063ef747c8432b2acc60bf8f70ec09a450000000000000000000000000000000000000000000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffa050e82a57b7fc6b6020c00000000000000000000000000000000000000000000000000000000",
"blockNumber": 14623434,
"transactionHash": HexBytes(
"0xa322a9fd0e627e22bfe1b0877cca1d1f2e697d076007231d0b7a366d1a0fdd51"
),
"transactionIndex": 333,
"blockHash": HexBytes("0x0fd77b0af3fa471aa040a02d4fcd1ec0a35122a4166d0bb7c31354e23823de49"),
"logIndex": 376,
"removed": False,
}
@pytest.fixture
def chain_that_mined_5(chain):
chain.mine(5)
return chain
| _ContractLogicError |
python | django__django | django/db/models/fields/__init__.py | {
"start": 96574,
"end": 98059
} | class ____:
db_returning = True
def __init__(self, *args, **kwargs):
kwargs["blank"] = True
super().__init__(*args, **kwargs)
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_primary_key(),
]
def _check_primary_key(self):
if not self.primary_key:
return [
checks.Error(
"AutoFields must set primary_key=True.",
obj=self,
id="fields.E100",
),
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["blank"]
kwargs["primary_key"] = True
return name, path, args, kwargs
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
if not prepared:
value = self.get_prep_value(value)
value = connection.ops.validate_autopk_value(value)
return value
def contribute_to_class(self, cls, name, **kwargs):
if cls._meta.auto_field:
raise ValueError(
"Model %s can't have more than one auto-generated field."
% cls._meta.label
)
super().contribute_to_class(cls, name, **kwargs)
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
| AutoFieldMixin |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 20594,
"end": 22222
} | class ____(object):
"""
This is a generic Hostname manipulation class that is subclassed
based on platform.
A subclass may wish to set different strategy instance to self.strategy.
All subclasses MUST define platform and distribution (which may be None).
"""
platform = 'Generic'
distribution = None # type: str | None
strategy_class = UnimplementedStrategy # type: t.Type[BaseStrategy]
def __new__(cls, *args, **kwargs):
new_cls = get_platform_subclass(Hostname)
return super(cls, new_cls).__new__(new_cls)
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.use = module.params['use']
if self.use is not None:
strategy = globals()['%sStrategy' % STRATS[self.use]]
self.strategy = strategy(module)
elif platform.system() == 'Linux' and ServiceMgrFactCollector.is_systemd_managed(module):
# This is Linux and systemd is active
self.strategy = SystemdStrategy(module)
else:
self.strategy = self.strategy_class(module)
def update_current_and_permanent_hostname(self):
return self.strategy.update_current_and_permanent_hostname()
def get_current_hostname(self):
return self.strategy.get_current_hostname()
def set_current_hostname(self, name):
self.strategy.set_current_hostname(name)
def get_permanent_hostname(self):
return self.strategy.get_permanent_hostname()
def set_permanent_hostname(self, name):
self.strategy.set_permanent_hostname(name)
| Hostname |
python | getsentry__sentry | src/sentry/search/events/builder/profiles.py | {
"start": 1163,
"end": 1279
} | class ____(ProfilesQueryBuilderMixin, BaseQueryBuilder):
config_class = ProfilesDatasetConfig
| ProfilesQueryBuilder |
python | pandas-dev__pandas | pandas/tests/extension/base/accumulate.py | {
"start": 66,
"end": 1501
} | class ____:
"""
Accumulation specific tests. Generally these only
make sense for numeric/boolean operations.
"""
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
# Do we expect this accumulation to be supported for this dtype?
# We default to assuming "no"; subclass authors should override here.
return False
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
try:
alt = ser.astype("float64")
except (TypeError, ValueError):
# e.g. Period can't be cast to float64 (TypeError)
# String can't be cast to float64 (ValueError)
alt = ser.astype(object)
result = getattr(ser, op_name)(skipna=skipna)
expected = getattr(alt, op_name)(skipna=skipna)
tm.assert_series_equal(result, expected, check_dtype=False)
@pytest.mark.parametrize("skipna", [True, False])
def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
op_name = all_numeric_accumulations
ser = pd.Series(data)
if self._supports_accumulation(ser, op_name):
self.check_accumulate(ser, op_name, skipna)
else:
with pytest.raises((NotImplementedError, TypeError)):
# TODO: require TypeError for things that will _never_ work?
getattr(ser, op_name)(skipna=skipna)
| BaseAccumulateTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property17.py | {
"start": 602,
"end": 696
} | class ____(RootMixin[T]):
root: Model[T]
def func1(s: Root[str]):
x: Proto[str] = s
| Root |
python | tensorflow__tensorflow | tensorflow/compiler/tests/qr_op_test.py | {
"start": 1395,
"end": 5745
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
def AdjustedNorm(self, x):
"""Computes the norm of matrices in 'x', adjusted for dimension and type."""
norm = np.linalg.norm(x, axis=(-2, -1))
return norm / (max(x.shape[-2:]) * np.finfo(x.dtype).eps)
def CompareOrthogonal(self, x, y, rank):
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertTrue(np.all(self.AdjustedNorm(x - y) < 30.0))
def CheckApproximation(self, a, q, r):
# Tests that a ~= q*r.
precision = self.AdjustedNorm(a - np.matmul(q, r))
self.assertTrue(np.all(precision < 11.0))
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
tol = 100 * np.finfo(x.dtype).eps
self.assertAllClose(xx, identity, atol=tol)
def _random_matrix(self, dtype, shape):
np.random.seed(1)
def rng():
return np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype)
x_np = rng()
if np.issubdtype(dtype, np.complexfloating):
x_np += rng() * dtype(1j)
return x_np
def _test(self, x_np, full_matrices, full_rank=True):
dtype = x_np.dtype
shape = x_np.shape
with self.session() as sess:
x_tf = array_ops.placeholder(dtype)
with self.device_scope():
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices)
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
if full_rank:
# Q is unique up to sign/phase if the matrix is full-rank.
self.CompareOrthogonal(np_q, q_tf_val, min(shape[-2:]))
self.CheckApproximation(x_np, q_tf_val, r_tf_val)
self.CheckUnitary(q_tf_val)
SIZES = [1, 2, 5, 10, 32, 100, 300, 603]
DTYPES = [np.float32, np.complex64]
PARAMS = itertools.product(SIZES, SIZES, DTYPES)
@parameterized.parameters(*PARAMS)
def testQR(self, rows, cols, dtype):
for full_matrices in [True, False]:
# Only tests the (3, 2) case for small numbers of rows/columns.
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
x_np = self._random_matrix(dtype, batch_dims + (rows, cols))
self._test(x_np, full_matrices)
def testLarge2000x2000(self):
x_np = self._random_matrix(np.float32, (2000, 2000))
self._test(x_np, full_matrices=True)
@unittest.skip("Test times out on CI")
def testLarge17500x128(self):
x_np = self._random_matrix(np.float32, (17500, 128))
self._test(x_np, full_matrices=True)
@parameterized.parameters((23, 25), (513, 23))
def testZeroColumn(self, rows, cols):
x_np = self._random_matrix(np.complex64, (rows, cols))
x_np[:, 7] = 0.
self._test(x_np, full_matrices=True)
@parameterized.parameters((4, 4), (514, 20))
def testRepeatedColumn(self, rows, cols):
x_np = self._random_matrix(np.complex64, (rows, cols))
x_np[:, 1] = x_np[:, 2]
self._test(x_np, full_matrices=True, full_rank=False)
if __name__ == "__main__":
test.main()
| QrOpTest |
python | getsentry__sentry | src/sentry/notifications/platform/types.py | {
"start": 913,
"end": 1199
} | class ____(StrEnum):
"""
The unique keys for each registered notification provider.
"""
EMAIL = ExternalProviderEnum.EMAIL
SLACK = ExternalProviderEnum.SLACK
MSTEAMS = ExternalProviderEnum.MSTEAMS
DISCORD = ExternalProviderEnum.DISCORD
| NotificationProviderKey |
python | Netflix__metaflow | metaflow/tracing/propagator.py | {
"start": 941,
"end": 2502
} | class ____(TextMapPropagator):
def __init__(self, formatter):
if formatter is None:
self.formatter = TraceContextTextMapPropagator()
else:
self.formatter = formatter
# delegating to extract function implementation of the formatter
def extract(
self,
carrier: CarrierT,
context: typing.Optional[Context] = None,
getter: Getter = DefaultGetter(),
) -> Context:
return self.formatter.extract(carrier=carrier, context=context, getter=getter)
# delegating to inject function implementation of the formatter
def inject(
self,
carrier: CarrierT,
context: typing.Optional[Context] = None,
setter: Setter = DefaultSetter(),
) -> None:
self.formatter.inject(carrier=carrier, context=context, setter=setter)
# function for the user to inject trace details or baggage
def inject_to_carrier(self, context: typing.Optional[Context] = None):
env_dict = os.environ.copy()
self.inject(carrier=env_dict, context=context, setter=DefaultSetter())
return env_dict
# function for the user to extract trace context or baggage
def extract_context(self) -> Context:
if self.formatter is None:
self.formatter = TraceContextTextMapPropagator()
return self.extract(carrier=os.environ, getter=DefaultGetter())
@property
def fields(self) -> typing.Set[str]:
# Returns a set with the fields set in `inject`.
return self.formatter.fields
| EnvPropagator |
python | huggingface__transformers | src/transformers/models/convnext/modeling_convnext.py | {
"start": 9574,
"end": 10220
} | class ____(PreTrainedModel):
config: ConvNextConfig
base_model_prefix = "convnext"
main_input_name = "pixel_values"
input_modalities = ("image",)
_no_split_modules = ["ConvNextLayer"]
_can_record_outputs = {} # hidden states are collected explicitly
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, ConvNextLayer):
if module.layer_scale_parameter is not None:
init.constant_(module.layer_scale_parameter, self.config.layer_scale_init_value)
@auto_docstring
| ConvNextPreTrainedModel |
python | mlflow__mlflow | mlflow/gateway/providers/bedrock.py | {
"start": 4810,
"end": 5537
} | class ____(Enum):
AMAZON = "amazon"
COHERE = "cohere"
AI21 = "ai21"
ANTHROPIC = "anthropic"
@property
def adapter_class(self) -> type[ProviderAdapter]:
return AWS_MODEL_PROVIDER_TO_ADAPTER.get(self)
@classmethod
def of_str(cls, name: str):
name = name.lower()
for opt in cls:
if opt.name.lower() in name or opt.value.lower() in name:
return opt
AWS_MODEL_PROVIDER_TO_ADAPTER = {
AmazonBedrockModelProvider.COHERE: CohereAdapter,
AmazonBedrockModelProvider.ANTHROPIC: AmazonBedrockAnthropicAdapter,
AmazonBedrockModelProvider.AMAZON: AWSTitanAdapter,
AmazonBedrockModelProvider.AI21: AI21Adapter,
}
| AmazonBedrockModelProvider |
python | dagster-io__dagster | docs/sphinx/_ext/sphinx-click/tests/test_formatter.py | {
"start": 14857,
"end": 17029
} | class ____(unittest.TestCase):
"""Validate basic ``click.Group`` instances."""
maxDiff = None
def test_no_parameters(self):
"""Validate a `click.Group` with no parameters.
This exercises the code paths for a group with *no* arguments, *no*
options and *no* environment variables.
"""
@click.group()
def cli():
"""A sample command group."""
pass
ctx = click.Context(cli, info_name="cli")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command group.
.. program:: cli
.. code-block:: shell
cli [OPTIONS] COMMAND [ARGS]...
"""
).lstrip(),
"\n".join(output),
)
def test_basic_parameters(self):
"""Validate a combination of parameters.
This exercises the code paths for a group with arguments, options and
environment variables.
"""
@click.group()
@click.option("--param", envvar="PARAM", help="A sample option")
@click.argument("ARG", envvar="ARG")
def cli():
"""A sample command group."""
pass
ctx = click.Context(cli, info_name="cli")
output = list(ext._format_command(ctx, nested="short")) # noqa
self.assertEqual(
textwrap.dedent(
"""
A sample command group.
.. program:: cli
.. code-block:: shell
cli [OPTIONS] ARG COMMAND [ARGS]...
.. rubric:: Options
.. option:: --param <param>
A sample option
.. rubric:: Arguments
.. option:: ARG
Required argument
.. rubric:: Environment variables
.. _cli-param-PARAM:
.. envvar:: PARAM
:noindex:
Provide a default for :option:`--param`
.. _cli-arg-ARG:
.. envvar:: ARG
:noindex:
Provide a default for :option:`ARG`
"""
).lstrip(),
"\n".join(output),
)
| GroupTestCase |
python | getsentry__sentry | src/sentry/seer/sentry_data_models.py | {
"start": 908,
"end": 1061
} | class ____(BaseModel):
trace_id: str
project_id: int
transaction_name: str
total_spans: int
spans: list[EvidenceSpan]
| EvidenceTraceData |
python | jina-ai__jina | jina/enums.py | {
"start": 7422,
"end": 7930
} | class ____(BetterEnum):
"""Provider type."""
NONE = 0 #: no provider
SAGEMAKER = 1 #: AWS SageMaker
AZURE = 2 #: AZURE
def replace_enum_to_str(obj):
"""
Transform BetterEnum type into string.
:param obj: Target obj.
:return: Transformed obj with string type values.
"""
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = replace_enum_to_str(v)
elif isinstance(v, BetterEnum):
obj[k] = str(v)
return obj
| ProviderType |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_unicode_ddl.py | {
"start": 600,
"end": 6109
} | class ____(fixtures.TablesTest):
__requires__ = ("unicode_ddl",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
global t1, t2, t3
t1 = Table(
"unitable1",
metadata,
Column("méil", Integer, primary_key=True),
Column("\u6e2c\u8a66", Integer),
test_needs_fk=True,
)
t2 = Table(
"Unitéble2",
metadata,
Column("méil", Integer, primary_key=True, key="a"),
Column(
"\u6e2c\u8a66",
Integer,
ForeignKey("unitable1.méil"),
key="b",
),
test_needs_fk=True,
)
# Few DBs support Unicode foreign keys
if testing.against("sqlite"):
t3 = Table(
"\u6e2c\u8a66",
metadata,
Column(
"\u6e2c\u8a66_id",
Integer,
primary_key=True,
autoincrement=False,
),
Column(
"unitable1_\u6e2c\u8a66",
Integer,
ForeignKey("unitable1.\u6e2c\u8a66"),
),
Column("Unitéble2_b", Integer, ForeignKey("Unitéble2.b")),
Column(
"\u6e2c\u8a66_self",
Integer,
ForeignKey("\u6e2c\u8a66.\u6e2c\u8a66_id"),
),
test_needs_fk=True,
)
else:
t3 = Table(
"\u6e2c\u8a66",
metadata,
Column(
"\u6e2c\u8a66_id",
Integer,
primary_key=True,
autoincrement=False,
),
Column("unitable1_\u6e2c\u8a66", Integer),
Column("Unitéble2_b", Integer),
Column("\u6e2c\u8a66_self", Integer),
test_needs_fk=True,
)
def test_insert(self, connection):
connection.execute(t1.insert(), {"méil": 1, "\u6e2c\u8a66": 5})
connection.execute(t2.insert(), {"a": 1, "b": 1})
connection.execute(
t3.insert(),
{
"\u6e2c\u8a66_id": 1,
"unitable1_\u6e2c\u8a66": 5,
"Unitéble2_b": 1,
"\u6e2c\u8a66_self": 1,
},
)
eq_(connection.execute(t1.select()).fetchall(), [(1, 5)])
eq_(connection.execute(t2.select()).fetchall(), [(1, 1)])
eq_(connection.execute(t3.select()).fetchall(), [(1, 5, 1, 1)])
def test_col_targeting(self, connection):
connection.execute(t1.insert(), {"méil": 1, "\u6e2c\u8a66": 5})
connection.execute(t2.insert(), {"a": 1, "b": 1})
connection.execute(
t3.insert(),
{
"\u6e2c\u8a66_id": 1,
"unitable1_\u6e2c\u8a66": 5,
"Unitéble2_b": 1,
"\u6e2c\u8a66_self": 1,
},
)
row = connection.execute(t1.select()).first()
eq_(row._mapping[t1.c["méil"]], 1)
eq_(row._mapping[t1.c["\u6e2c\u8a66"]], 5)
row = connection.execute(t2.select()).first()
eq_(row._mapping[t2.c["a"]], 1)
eq_(row._mapping[t2.c["b"]], 1)
row = connection.execute(t3.select()).first()
eq_(row._mapping[t3.c["\u6e2c\u8a66_id"]], 1)
eq_(row._mapping[t3.c["unitable1_\u6e2c\u8a66"]], 5)
eq_(row._mapping[t3.c["Unitéble2_b"]], 1)
eq_(row._mapping[t3.c["\u6e2c\u8a66_self"]], 1)
def test_reflect(self, connection):
connection.execute(t1.insert(), {"méil": 2, "\u6e2c\u8a66": 7})
connection.execute(t2.insert(), {"a": 2, "b": 2})
connection.execute(
t3.insert(),
{
"\u6e2c\u8a66_id": 2,
"unitable1_\u6e2c\u8a66": 7,
"Unitéble2_b": 2,
"\u6e2c\u8a66_self": 2,
},
)
meta = MetaData()
tt1 = Table(t1.name, meta, autoload_with=connection)
tt2 = Table(t2.name, meta, autoload_with=connection)
tt3 = Table(t3.name, meta, autoload_with=connection)
connection.execute(tt1.insert(), {"méil": 1, "\u6e2c\u8a66": 5})
connection.execute(tt2.insert(), {"méil": 1, "\u6e2c\u8a66": 1})
connection.execute(
tt3.insert(),
{
"\u6e2c\u8a66_id": 1,
"unitable1_\u6e2c\u8a66": 5,
"Unitéble2_b": 1,
"\u6e2c\u8a66_self": 1,
},
)
eq_(
connection.execute(tt1.select().order_by(desc("méil"))).fetchall(),
[(2, 7), (1, 5)],
)
eq_(
connection.execute(tt2.select().order_by(desc("méil"))).fetchall(),
[(2, 2), (1, 1)],
)
eq_(
connection.execute(
tt3.select().order_by(desc("\u6e2c\u8a66_id"))
).fetchall(),
[(2, 7, 2, 2), (1, 5, 1, 1)],
)
def test_repr(self):
meta = MetaData()
t = Table("\u6e2c\u8a66", meta, Column("\u6e2c\u8a66_id", Integer))
eq_(
repr(t),
(
"Table('測試', MetaData(), "
"Column('測試_id', Integer(), "
"table=<測試>), "
"schema=None)"
),
)
| UnicodeSchemaTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 36275,
"end": 37806
} | class ____(Response):
"""
Response of models.create endpoint.
:param id: ID of the model
:type id: str
:param created: Was the model created
:type created: bool
"""
_service = "models"
_action = "create"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"created": {
"description": "Was the model created",
"type": ["boolean", "null"],
},
"id": {"description": "ID of the model", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(self, id: Optional[str] = None, created: Optional[bool] = None, **kwargs: Any) -> None:
super(CreateResponse, self).__init__(**kwargs)
self.id = id
self.created = created
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("created")
def created(self) -> Optional[bool]:
return self._property_created
@created.setter
def created(self, value: Optional[bool]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", (bool,))
self._property_created = value
| CreateResponse |
python | django__django | tests/auth_tests/test_signals.py | {
"start": 396,
"end": 4892
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username="testclient", password="password")
cls.u3 = User.objects.create_user(username="staff", password="password")
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def listener_login_failed(self, sender, **kwargs):
self.login_failed.append(kwargs)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
self.login_failed = []
signals.user_logged_in.connect(self.listener_login)
self.addCleanup(signals.user_logged_in.disconnect, self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
self.addCleanup(signals.user_logged_out.disconnect, self.listener_logout)
signals.user_login_failed.connect(self.listener_login_failed)
self.addCleanup(
signals.user_login_failed.disconnect, self.listener_login_failed
)
def test_login(self):
# Only a successful login will trigger the success signal.
self.client.login(username="testclient", password="bad")
self.assertEqual(len(self.logged_in), 0)
self.assertEqual(len(self.login_failed), 1)
self.assertEqual(self.login_failed[0]["credentials"]["username"], "testclient")
# verify the password is cleansed
self.assertIn("***", self.login_failed[0]["credentials"]["password"])
self.assertIn("request", self.login_failed[0])
# Like this:
self.client.login(username="testclient", password="password")
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, "testclient")
# Ensure there were no more failures.
self.assertEqual(len(self.login_failed), 1)
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.post("/logout/next_page/")
self.assertEqual(len(self.logged_out), 1)
self.assertIsNone(self.logged_out[0])
def test_logout(self):
self.client.login(username="testclient", password="password")
self.client.post("/logout/next_page/")
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, "testclient")
def test_update_last_login(self):
"""Only `last_login` is updated in `update_last_login`"""
user = self.u3
old_last_login = user.last_login
user.username = "This username shouldn't get saved"
request = RequestFactory().get("/login")
signals.user_logged_in.send(sender=user.__class__, request=request, user=user)
user = User.objects.get(pk=user.pk)
self.assertEqual(user.username, "staff")
self.assertNotEqual(user.last_login, old_last_login)
def test_failed_login_without_request(self):
authenticate(username="testclient", password="bad")
self.assertIsNone(self.login_failed[0]["request"])
def test_login_with_custom_user_without_last_login_field(self):
"""
The user_logged_in signal is only registered if the user model has a
last_login field.
"""
last_login_receivers = signals.user_logged_in.receivers
try:
signals.user_logged_in.receivers = []
with self.assertRaises(FieldDoesNotExist):
MinimalUser._meta.get_field("last_login")
with self.settings(AUTH_USER_MODEL="auth_tests.MinimalUser"):
apps.get_app_config("auth").ready()
self.assertEqual(signals.user_logged_in.receivers, [])
# last_login is a property whose value is None.
self.assertIsNone(UserWithDisabledLastLoginField().last_login)
with self.settings(
AUTH_USER_MODEL="auth_tests.UserWithDisabledLastLoginField"
):
apps.get_app_config("auth").ready()
self.assertEqual(signals.user_logged_in.receivers, [])
with self.settings(AUTH_USER_MODEL="auth.User"):
apps.get_app_config("auth").ready()
self.assertEqual(len(signals.user_logged_in.receivers), 1)
finally:
signals.user_logged_in.receivers = last_login_receivers
| SignalTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassPostInit1.py | {
"start": 199,
"end": 363
} | class ____:
a: InitVar[int]
b: InitVar[str]
c: InitVar[bool]
def __post_init__(self, x: float, y: str, z: int, xx: int = 3) -> None: ...
@dataclass
| A |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/config_types.py | {
"start": 13710,
"end": 15425
} | class ____(graphene.ObjectType):
fields = non_null_list(GrapheneConfigTypeField)
class Meta:
interfaces = (GrapheneConfigType,)
name = "CompositeConfigType"
def __init__(
self,
get_config_type: Callable[[str], ConfigTypeSnap],
config_type_snap: ConfigTypeSnap,
):
self._config_type_snap = check.inst_param(
config_type_snap, "config_type_snap", ConfigTypeSnap
)
self._get_config_type = get_config_type
super().__init__(**_ctor_kwargs_for_snap(config_type_snap))
def resolve_recursive_config_types(
self, graphene_info: ResolveInfo
) -> list[GrapheneConfigTypeUnion]:
return [
to_config_type(self._get_config_type, config_type_key)
for config_type_key in _recursive_config_type_keys(
self._get_config_type, self._config_type_snap
)
]
def resolve_fields(self, graphene_info: ResolveInfo) -> list[GrapheneConfigTypeField]:
return sorted(
[
GrapheneConfigTypeField(
get_config_type=self._get_config_type,
field_snap=field_snap,
)
for field_snap in (self._config_type_snap.fields or [])
],
key=lambda field: field.name,
)
types = [
GrapheneArrayConfigType,
GrapheneCompositeConfigType,
GrapheneConfigType,
GrapheneConfigTypeField,
GrapheneEnumConfigType,
GrapheneEnumConfigValue,
GrapheneNullableConfigType,
GrapheneRegularConfigType,
GrapheneScalarUnionConfigType,
GrapheneWrappingConfigType,
GrapheneMapConfigType,
]
| GrapheneCompositeConfigType |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 45562,
"end": 47219
} | class ____:
row = [
"bluedevil breeze breeze-gtk eos-bash-shared glib2 "
"kactivitymanagerd kde-cli-tools kde-gtk-config kdecoration"
]
EXPECTED_TRUE = """+------------------------------------------+
| Field 1 |
+------------------------------------------+
| bluedevil breeze breeze-gtk eos-bash- |
| shared glib2 kactivitymanagerd kde-cli- |
| tools kde-gtk-config kdecoration |
+------------------------------------------+"""
EXPECTED_FALSE = """+------------------------------------------+
| Field 1 |
+------------------------------------------+
| bluedevil breeze breeze-gtk |
| eos-bash-shared glib2 kactivitymanagerd |
| kde-cli-tools kde-gtk-config kdecoration |
+------------------------------------------+"""
def test_break_on_hyphens(self) -> None:
table = PrettyTable(max_width=40)
table.break_on_hyphens = False
assert not table.break_on_hyphens
table.add_row(self.row)
assert table.get_string().strip() == self.EXPECTED_FALSE
def test_break_on_hyphens_on_init(self) -> None:
table = PrettyTable(max_width=40, break_on_hyphens=False)
assert not table._break_on_hyphens
assert not table.break_on_hyphens
table.add_row(self.row)
assert table.get_string().strip() == self.EXPECTED_FALSE
def test_break_on_hyphens_default(self) -> None:
table = PrettyTable(max_width=40)
assert table.break_on_hyphens
table.add_row(self.row)
assert table.get_string().strip() == self.EXPECTED_TRUE
| TestBreakOnHyphens |
python | great-expectations__great_expectations | great_expectations/metrics/metric.py | {
"start": 1316,
"end": 1487
} | class ____(ValueError):
def __init__(self, param_name) -> None:
super().__init__("{param_name} must be a non-empty string.")
@dataclass_transform()
| EmptyStrError |
python | astropy__astropy | astropy/samp/__init__.py | {
"start": 619,
"end": 1039
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.samp` to use the internet, if available.",
aliases=["astropy.samp.utils.use_internet"],
)
n_retries = _config.ConfigItem(
10, "How many times to retry communications when they fail"
)
conf = Conf()
| Conf |
python | gevent__gevent | src/greentest/3.13/test_socket.py | {
"start": 144166,
"end": 160361
} | class ____(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(is_apple, "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(is_apple, "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(is_apple, "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(is_apple, "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(is_apple, "skipping, see issue #12958")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTrunc0.client_skip
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
@testCmsgTrunc1.client_skip
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
@testCmsgTrunc2Int.client_skip
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
@testCmsgTruncLen0Minus1.client_skip
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
with downgrade_malformed_data_warning(): # TODO: gh-110012
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
@testCmsgTruncLen0.client_skip
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
@testCmsgTruncLen0Plus1.client_skip
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
@testCmsgTruncLen1.client_skip
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
@skipForRefleakHuntinIf(sys.platform == "darwin", "#80931")
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
@testCmsgTruncLen2Minus1.client_skip
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
| SCMRightsTest |
python | PyCQA__pylint | tests/functional/m/membership_protocol.py | {
"start": 1833,
"end": 2148
} | class ____:
valid_values = None
def validate(self, value):
if self.valid_values is None:
return True
else:
# error should not be emitted here
return value in self.valid_values
# class is not named as abstract
# but still is deduceably abstract
| AbstractThing |
python | allegroai__clearml | clearml/storage/helper.py | {
"start": 17527,
"end": 34971
} | class ____(_Driver):
"""Boto3 storage adapter (simple, enough for now)"""
_min_pool_connections = 512
_max_multipart_concurrency = deferred_config("aws.boto3.max_multipart_concurrency", 16)
_multipart_threshold = deferred_config("aws.boto3.multipart_threshold", (1024**2) * 8) # 8 MB
_multipart_chunksize = deferred_config("aws.boto3.multipart_chunksize", (1024**2) * 8)
_pool_connections = deferred_config("aws.boto3.pool_connections", 512)
_connect_timeout = deferred_config("aws.boto3.connect_timeout", 60)
_read_timeout = deferred_config("aws.boto3.read_timeout", 60)
_signature_version = deferred_config("aws.boto3.signature_version", None)
_stream_download_pool_connections = deferred_config("aws.boto3.stream_connections", 128)
_stream_download_pool = None
_stream_download_pool_pid = None
_containers = {}
scheme = "s3"
scheme_prefix = str(furl(scheme=scheme, netloc=""))
_bucket_location_failure_reported = set()
class _Container(object):
_creation_lock = ForkSafeRLock()
def __init__(self, name: str, cfg: S3BucketConfig) -> None:
try:
import boto3
except ImportError:
raise UsageError(
"AWS S3 storage driver (boto3) not found. " 'Please install driver using: pip install "clearml[s3]"'
)
# skip 's3://'
self.name = name[5:]
# boto3 client creation isn't thread-safe (client itself is)
with self._creation_lock:
boto_kwargs = _Boto3Driver._get_boto_resource_kwargs_from_config(cfg)
boto_session = boto3.Session(
profile_name=cfg.profile or None,
)
self.resource = boto_session.resource("s3", **boto_kwargs)
self.config = cfg
bucket_name = self.name[len(cfg.host) + 1 :] if cfg.host else self.name
self.bucket = self.resource.Bucket(bucket_name)
@attrs
class ListResult(object):
name = attrib(default=None)
size = attrib(default=None)
def __init__(self) -> None:
pass
def _get_stream_download_pool(self) -> ThreadPoolExecutor:
if self._stream_download_pool is None or self._stream_download_pool_pid != os.getpid():
self._stream_download_pool_pid = os.getpid()
self._stream_download_pool = ThreadPoolExecutor(max_workers=int(self._stream_download_pool_connections))
return self._stream_download_pool
@classmethod
def _get_boto_resource_kwargs_from_config(cls, cfg: S3BucketConfig) -> Dict[str, Any]:
try:
import botocore.client
except ImportError:
raise UsageError(
"AWS S3 storage driver (boto3) not found. " 'Please install driver using: pip install "clearml[s3]"'
)
endpoint = (("https://" if cfg.secure else "http://") + cfg.host) if cfg.host else None
verify = cfg.verify
if verify is True:
# True is a non-documented value for boto3, use None instead (which means verify)
verify = None
elif isinstance(verify, str) and not os.path.exists(verify) and verify.split("://")[0] in driver_schemes:
verify = _Boto3Driver.download_cert(verify)
boto_kwargs = {
"endpoint_url": endpoint,
"use_ssl": cfg.secure,
"verify": verify,
"region_name": cfg.region or None, # None in case cfg.region is an empty string
"config": botocore.client.Config(
max_pool_connections=max(
int(_Boto3Driver._min_pool_connections),
int(_Boto3Driver._pool_connections),
),
connect_timeout=int(_Boto3Driver._connect_timeout),
read_timeout=int(_Boto3Driver._read_timeout),
signature_version=_Boto3Driver._signature_version,
),
}
if not cfg.use_credentials_chain:
boto_kwargs["aws_access_key_id"] = cfg.key or None
boto_kwargs["aws_secret_access_key"] = cfg.secret or None
if cfg.token:
boto_kwargs["aws_session_token"] = cfg.token
return boto_kwargs
def get_container(
self,
container_name: str,
config: Optional[Any] = None,
**kwargs: Any,
) -> _Container:
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, cfg=config)
self._containers[container_name].config.retries = kwargs.get("retries", 5)
return self._containers[container_name]
def upload_object_via_stream(
self,
iterator: Any,
container: Any,
object_name: str,
callback: Any = None,
extra: dict = None,
**kwargs: Any,
) -> bool:
import boto3.s3.transfer
stream = _Stream(iterator)
extra_args = {}
try:
extra_args = {"ContentType": get_file_mimetype(object_name)}
extra_args.update(container.config.extra_args or {})
container.bucket.upload_fileobj(
stream,
object_name,
Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=int(self._max_multipart_concurrency) if container.config.multipart else 1,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
),
Callback=callback,
ExtraArgs=extra_args,
)
except RuntimeError:
# one might get an error similar to: "RuntimeError: cannot schedule new futures after interpreter shutdown"
# In this case, retry the upload without threads
try:
container.bucket.upload_fileobj(
stream,
object_name,
Config=boto3.s3.transfer.TransferConfig(
use_threads=False,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
),
Callback=callback,
ExtraArgs=extra_args,
)
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
return False
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
return False
return True
def upload_object(
self,
file_path: str,
container: Any,
object_name: str,
callback: Any = None,
extra: dict = None,
**kwargs: Any,
) -> bool:
import boto3.s3.transfer
extra_args = {}
try:
extra_args = {"ContentType": get_file_mimetype(object_name or file_path)}
extra_args.update(container.config.extra_args or {})
container.bucket.upload_file(
file_path,
object_name,
Config=boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=int(self._max_multipart_concurrency) if container.config.multipart else 1,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
),
Callback=callback,
ExtraArgs=extra_args,
)
except RuntimeError:
# one might get an error similar to: "RuntimeError: cannot schedule new futures after interpreter shutdown"
# In this case, retry the upload without threads
try:
container.bucket.upload_file(
file_path,
object_name,
Config=boto3.s3.transfer.TransferConfig(
use_threads=False,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
),
Callback=callback,
ExtraArgs=extra_args,
)
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
return False
except Exception as ex:
self.get_logger().error("Failed uploading: %s" % ex)
return False
return True
def list_container_objects(
self,
container: Any,
ex_prefix: Optional[str] = None,
**kwargs: Any,
) -> Generator[ListResult, None, None]:
if ex_prefix:
res = container.bucket.objects.filter(Prefix=ex_prefix)
else:
res = container.bucket.objects.all()
for res in res:
yield self.ListResult(name=res.key, size=res.size)
def delete_object(self, object: Any, **kwargs: Any) -> bool:
from botocore.exceptions import ClientError
object.delete()
try:
# Try loading the file to verify deletion
object.load()
return False
except ClientError as e:
return int(e.response["Error"]["Code"]) == 404
def get_object(
self,
container_name: str,
object_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
full_container_name = "s3://" + container_name
container = self._containers[full_container_name]
obj = container.resource.Object(container.bucket.name, object_name)
obj.container_name = full_container_name
return obj
def download_object_as_stream(
self,
obj: Any,
chunk_size: int = 64 * 1024,
verbose: bool = None,
log: Any = None,
**_: Any,
) -> _Stream:
def async_download(a_obj: Any, a_stream: Any, cb: Any, cfg: Any) -> None:
try:
a_obj.download_fileobj(a_stream, Callback=cb, Config=cfg)
if cb:
cb.close(report_completed=True)
except Exception as ex:
if cb:
cb.close()
(log or self.get_logger()).error("Failed downloading: %s" % ex)
a_stream.close()
import boto3.s3.transfer
# return iterable object
stream = _Stream()
container = self._containers[obj.container_name]
config = boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=int(self._max_multipart_concurrency) if container.config.multipart else 1,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
)
total_size_mb = obj.content_length / (1024.0 * 1024.0)
remote_path = os.path.join(obj.container_name, obj.key)
cb = DownloadProgressReport(total_size_mb, verbose, remote_path, log)
self._get_stream_download_pool().submit(async_download, obj, stream, cb, config)
return stream
def download_object(
self,
obj: Any,
local_path: str,
overwrite_existing: bool = True,
delete_on_failure: bool = True,
callback: Any = None,
**_: Any,
) -> None:
import boto3.s3.transfer
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning("failed saving after download: overwrite=False and file exists (%s)" % str(p))
return
container = self._containers[obj.container_name]
Config = boto3.s3.transfer.TransferConfig(
use_threads=container.config.multipart,
max_concurrency=int(self._max_multipart_concurrency) if container.config.multipart else 1,
num_download_attempts=container.config.retries,
multipart_threshold=int(self._multipart_threshold),
multipart_chunksize=int(self._multipart_chunksize),
)
obj.download_file(str(p), Callback=callback, Config=Config)
@classmethod
def _test_bucket_config(
cls,
conf: S3BucketConfig,
log: logging.Logger,
test_path: str = "",
raise_on_error: bool = True,
log_on_error: bool = True,
) -> bool:
try:
import boto3
from botocore.exceptions import ClientError
except ImportError:
return False
if not conf.bucket:
return False
try:
if not conf.is_valid():
raise Exception("Missing credentials")
fullname = furl(conf.bucket).add(path=test_path).add(path="%s-upload_test" % cls.__module__)
bucket_name = str(fullname.path.segments[0])
filename = str(furl(path=fullname.path.segments[1:]))
if conf.subdir:
filename = "{}/{}".format(conf.subdir, filename)
data = {
"user": getpass.getuser(),
"machine": gethostname(),
"time": datetime.now(timezone.utc).isoformat(),
}
boto_session = boto3.Session(
profile_name=conf.profile or None,
)
boto_kwargs = _Boto3Driver._get_boto_resource_kwargs_from_config(conf)
boto_resource = boto_session.resource("s3", **boto_kwargs)
bucket = boto_resource.Bucket(bucket_name)
bucket.put_object(Key=filename, Body=json.dumps(data).encode("utf-8"))
region = cls._get_bucket_region(conf=conf, log=log, report_info=True)
if region and ((conf.region and region != conf.region) or (not conf.region and region != "us-east-1")):
msg = "incorrect region specified for bucket %s (detected region %s)" % (conf.bucket, region)
else:
return True
except ClientError as ex:
msg = ex.response["Error"]["Message"]
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
except Exception as ex:
msg = str(ex)
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise
msg = ("Failed testing access to bucket %s: " % conf.bucket) + msg
if log_on_error and log:
log.error(msg)
if raise_on_error:
raise StorageError(msg)
return False
@classmethod
def _get_bucket_region(
cls,
conf: S3BucketConfigurations,
log: logging.Logger = None,
report_info: bool = False,
) -> str:
import boto3
from botocore.exceptions import ClientError
if not conf.bucket:
return None
def report(msg: str) -> None:
if log and conf.get_bucket_host() not in cls._bucket_location_failure_reported:
if report_info:
log.debug(msg)
else:
log.warning(msg)
cls._bucket_location_failure_reported.add(conf.get_bucket_host())
try:
boto_session = boto3.Session(
profile_name=conf.profile_name or None,
)
boto_kwargs = _Boto3Driver._get_boto_resource_kwargs_from_config(conf)
boto_kwargs.pop("region_name", None)
boto_resource = boto_session.resource("s3", **boto_kwargs)
return boto_resource.meta.client.get_bucket_location(Bucket=conf.bucket)["LocationConstraint"]
except ClientError as ex:
report(
"Failed getting bucket location (region) for bucket "
"%s: %s (%s, access_key=%s). Default region will be used. "
"This is normal if you do not have GET_BUCKET_LOCATION permission"
% (
conf.bucket,
ex.response["Error"]["Message"],
ex.response["Error"]["Code"],
conf.key,
)
)
except Exception as ex:
report(
"Failed getting bucket location (region) for bucket %s: %s. Default region will be used."
% (conf.bucket, str(ex))
)
return None
def get_direct_access(self, remote_path: str, **_: Any) -> None:
return None
def test_upload(self, test_path: str, config: Any, **_: Any) -> bool:
return True
def exists_file(self, container_name: str, object_name: str) -> bool:
obj = self.get_object(container_name, object_name)
# noinspection PyBroadException
try:
obj.load()
except Exception:
return False
return bool(obj)
| _Boto3Driver |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/repeat_test.py | {
"start": 6656,
"end": 9285
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 6, 7])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 0])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([]).repeat(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(elements=[0, 5, 10],
count=[0, 3, 8])))
def testFiniteRepeat(self, elements, count):
dataset = dataset_ops.Dataset.range(elements).repeat(count)
expected_dataset = np.tile(
np.arange(
start=0, stop=elements, step=1, dtype=dtypes.int64.as_numpy_dtype),
count)
for i in range(elements * count):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
expected_dataset[i])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
elements=[0, 3, 5], count_1=[0, 1, 2], count_2=[3, 4, 5])))
def testRepeatRepeat(self, elements, count_1, count_2):
dataset = dataset_ops.Dataset.range(elements).repeat(count_1).repeat(
count_2)
expected_dataset = np.tile(
np.arange(
start=0, stop=elements, step=1, dtype=dtypes.int64.as_numpy_dtype),
count_1 * count_2)
for i in range(elements * count_1 * count_2):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
expected_dataset[i])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(elements=[3, 5], count=[None, -1, -2])))
def testInfiniteRepeat(self, elements, count):
dataset = dataset_ops.Dataset.range(elements).repeat(count=count)
# Datasets with infinite cardinality do not support random access.
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(random_access.at(dataset, index=0))
| RepeatRandomAccessTest |
python | google__pytype | pytype/types/functions.py | {
"start": 2096,
"end": 2247
} | class ____:
"""A single function argument. Used in the matcher and for error handling."""
name: str
value: base.Variable
typ: base.BaseValue
| Arg |
python | getsentry__sentry | src/sentry/preprod/migrations/0002_drop_sentry_jsonfield.py | {
"start": 239,
"end": 1535
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("preprod", "0001_emerge_upload_models"),
]
operations = [
SafeRemoveField(
model_name="preprodartifact",
name="extras",
deletion_action=DeletionAction.MOVE_TO_PENDING,
),
]
| Migration |
python | pyca__cryptography | tests/hazmat/primitives/test_argon2.py | {
"start": 1341,
"end": 10924
} | class ____:
@pytest.fixture(scope="class", params=variants)
def clazz(self, request) -> type:
return request.param
@pytest.mark.parametrize(
"params", vectors, ids=lambda x: f"{x[0].__name__}-params"
)
def test_derive(self, params, backend):
argon_clazz, params = params
salt = binascii.unhexlify(params["salt"])
ad = binascii.unhexlify(params["ad"]) if "ad" in params else None
secret = (
binascii.unhexlify(params["secret"])
if "secret" in params
else None
)
length = int(params["length"])
iterations = int(params["iter"])
lanes = int(params["lanes"])
memory_cost = int(params["memcost"])
password = binascii.unhexlify(params["pass"])
derived_key = params["output"].lower()
variant = argon_clazz(
salt=salt,
length=length,
iterations=iterations,
lanes=lanes,
memory_cost=memory_cost,
ad=ad,
secret=secret,
)
assert binascii.hexlify(variant.derive(password)) == derived_key
def test_invalid_types(self, clazz, backend):
with pytest.raises(TypeError):
clazz(
salt="notbytes",
length=32,
iterations=1,
lanes=1,
memory_cost=32,
ad=None,
secret=None,
)
with pytest.raises(TypeError):
clazz(
salt=b"b" * 8,
length=32,
iterations=1,
lanes=1,
memory_cost=32,
ad="string",
secret=None,
)
with pytest.raises(TypeError):
clazz(
salt=b"b" * 8,
length=32,
iterations=1,
lanes=1,
memory_cost=32,
ad=None,
secret="string",
)
@pytest.mark.parametrize(
"params",
[
(b"b" * 7, 3, 1, 1, 32), # salt < 8
(b"b" * 8, 3, 1, 1, 32), # length < 4
(b"b" * 8, 32, 0, 1, 32), # iterations < 1
(b"b" * 8, 32, 1, 0, 32), # lanes < 1
(b"b" * 8, 32, 1, 1, 7), # memory_cost < 8 * lanes
(b"b" * 8, 32, 1, 32, 200), # memory_cost < 8 * lanes
],
)
def test_invalid_values(self, clazz, params, backend):
(salt, length, iterations, lanes, memory_cost) = params
with pytest.raises(ValueError):
clazz(
salt=salt,
length=length,
iterations=iterations,
lanes=lanes,
memory_cost=memory_cost,
)
def test_already_finalized(self, clazz, backend):
argon2id = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
argon2id.derive(b"password")
with pytest.raises(AlreadyFinalized):
argon2id.derive(b"password")
def test_already_finalized_verify(self, clazz, backend):
argon2id = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
digest = argon2id.derive(b"password")
with pytest.raises(AlreadyFinalized):
argon2id.verify(b"password", digest)
@pytest.mark.parametrize("digest", [b"invalidkey", b"0" * 32])
def test_invalid_verify(self, clazz, digest, backend):
argon2id = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
with pytest.raises(InvalidKey):
argon2id.verify(b"password", digest)
def test_verify(self, clazz, backend):
argon2id = clazz(
salt=b"salt" * 2,
length=32,
iterations=1,
lanes=1,
memory_cost=32,
ad=None,
secret=None,
)
digest = argon2id.derive(b"password")
clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
).verify(b"password", digest)
def test_derive_into(self, clazz, backend):
argon2 = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
buf = bytearray(32)
n = argon2.derive_into(b"password", buf)
assert n == 32
# Verify the output matches what derive would produce
argon2_2 = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
expected = argon2_2.derive(b"password")
assert buf == expected
@pytest.mark.parametrize(
("buflen", "outlen"), [(31, 32), (33, 32), (16, 32), (64, 32)]
)
def test_derive_into_buffer_incorrect_size(
self, clazz, buflen, outlen, backend
):
argon2 = clazz(
salt=b"salt" * 2,
length=outlen,
iterations=1,
lanes=1,
memory_cost=32,
)
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
argon2.derive_into(b"password", buf)
def test_derive_into_already_finalized(self, clazz, backend):
argon2 = clazz(
salt=b"salt" * 2, length=32, iterations=1, lanes=1, memory_cost=32
)
buf = bytearray(32)
argon2.derive_into(b"password", buf)
with pytest.raises(AlreadyFinalized):
argon2.derive_into(b"password2", buf)
def test_derive_phc_encoded(self, backend):
# Test that we can generate a PHC formatted string
argon2id = Argon2id(
salt=b"0" * 8,
length=32,
iterations=2,
lanes=2,
memory_cost=64,
)
encoded = argon2id.derive_phc_encoded(b"password")
# Verify the general format is correct
assert encoded == (
"$argon2id$v=19$m=64,t=2,p=2$"
"MDAwMDAwMDA$"
"jFn1qYAgmfVKFWVeUGQcVK4d8RSiQJFTS7R7VII+fRk"
)
def test_verify_phc_encoded(self, clazz):
# First generate a PHC string
argon2 = clazz(
salt=b"0" * 8,
length=32,
iterations=1,
lanes=1,
memory_cost=32,
)
encoded = argon2.derive_phc_encoded(b"password")
clazz.verify_phc_encoded(b"password", encoded)
clazz(
salt=b"0" * 8,
length=32,
iterations=1,
lanes=1,
memory_cost=32,
).verify(b"password", base64.b64decode(encoded.split("$")[-1] + "="))
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(b"wrong_password", encoded)
def test_verify_phc_vector(self):
# From https://github.com/P-H-C/phc-string-format/blob/master/phc-sf-spec.md#example
Argon2id.verify_phc_encoded(
b"hunter2",
"$argon2id$v=19$m=65536,t=2,p=1$gZiV/M1gPc22ElAH/Jh1Hw$CWOrkoo7oJBQ/iyh7uJ0LO2aLEfrHwTWllSAxT0zRno",
secret=b"pepper",
)
def test_verify_phc_encoded_invalid_format(self, clazz):
# Totally invalid string
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(b"password", "not-a-valid-format")
# Invalid algorithm
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", "$krypton7$v=19$m=32,t=1,p=1$c2FsdHNhbHQ$hash"
)
# Incorrect variant specified, offer a more helpful error message
wrong_variant = "argon2id" if clazz is not Argon2id else "argon2d"
with pytest.raises(InvalidKey, match="did you mean to use"):
clazz.verify_phc_encoded(
b"password",
f"${wrong_variant}$v=19$m=32,t=1,p=1$c2FsdHNhbHQ$!invalid!",
)
variant = clazz.__name__.lower()
# Invalid version
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=18$m=32,t=1,p=1$c2FsdHNhbHQ$hash"
)
# Missing parameters
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$m=32,t=1$c2FsdHNhbHQ$hash"
)
# Parameters in wrong order
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$t=1,m=32,p=1$c2FsdHNhbHQ$hash"
)
# Invalid memory cost
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$m=abc,t=1,p=1$!invalid!$hash"
)
# Invalid iterations
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$m=32,t=abc,p=1$!invalid!$hash"
)
# Invalid lanes
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$m=32,t=1,p=abc$!invalid!$hash"
)
# Invalid base64 in salt
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password", f"${variant}$v=19$m=32,t=1,p=1$!invalid!$hash"
)
# Invalid base64 in hash
with pytest.raises(InvalidKey):
clazz.verify_phc_encoded(
b"password",
f"${variant}$v=19$m=32,t=1,p=1$c2FsdHNhbHQ$!invalid!",
)
| TestArgon2 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup.py | {
"start": 13943,
"end": 16538
} | class ____:
def __init__(self, arg: int):
pass
@given(st.from_type(UnknownAnnotatedType))
def test_builds_for_unknown_annotated_type(ex):
assert isinstance(ex, UnknownAnnotatedType)
def unknown_annotated_func(a: UnknownType, b=2, *, c: UnknownType, d=4):
pass
def test_raises_for_arg_with_unresolvable_annotation():
with pytest.raises(ResolutionFailed):
check_can_generate_examples(st.builds(unknown_annotated_func))
with pytest.raises(ResolutionFailed):
check_can_generate_examples(
st.builds(unknown_annotated_func, a=st.none(), c=...)
)
@given(a=..., b=...)
def test_can_use_type_hints(a: int, b: float):
assert isinstance(a, int)
assert isinstance(b, float)
def test_error_if_has_unresolvable_hints():
@given(a=...)
def inner(a: UnknownType):
pass
with pytest.raises(InvalidArgument):
inner()
def test_resolves_NewType():
typ = typing.NewType("T", int)
nested = typing.NewType("NestedT", typ)
uni = typing.NewType("UnionT", int | None)
assert_simple_property(from_type(typ), lambda x: isinstance(x, int))
assert_simple_property(from_type(nested), lambda x: isinstance(x, int))
assert_simple_property(from_type(uni), lambda x: isinstance(x, (int, type(None))))
find_any(from_type(uni), lambda x: isinstance(x, int))
find_any(from_type(uni), lambda x: isinstance(x, type(None)))
@pytest.mark.parametrize("is_handled", [True, False])
def test_resolves_NewType_conditionally(is_handled):
sentinel = object()
typ = typing.NewType("T", int)
def resolve_custom_strategy(thing):
assert thing is typ
if is_handled:
return st.just(sentinel)
return NotImplemented
with temp_registered(typ, resolve_custom_strategy):
if is_handled:
assert_simple_property(st.from_type(typ), lambda x: x is sentinel)
else:
assert_simple_property(st.from_type(typ), lambda x: isinstance(x, int))
E = enum.Enum("E", "a b c")
@given(from_type(E))
def test_resolves_enum(ex):
assert isinstance(ex, E)
@pytest.mark.parametrize("resolver", [from_type, st.sampled_from])
def test_resolves_flag_enum(resolver):
# Storing all combinations takes O(2^n) memory. Using an enum of 52
# members in this test ensures that we won't try!
F = enum.Flag("F", " ".join(string.ascii_letters))
# Checks for combination coverage are found in nocover/test_sampled_from
@given(resolver(F))
def inner(ex):
assert isinstance(ex, F)
inner()
| UnknownAnnotatedType |
python | django__django | tests/auth_tests/test_middleware.py | {
"start": 461,
"end": 3207
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.user = User.objects.create_user(
"test_user", "test@example.com", "test_password"
)
cls.user2 = User.objects.create_user(
"test_user2", "test2@example.com", "test_password2"
)
def setUp(self):
self.middleware = AuthenticationMiddleware(lambda req: HttpResponse())
self.client.force_login(self.user)
self.request = HttpRequest()
self.request.session = self.client.session
def test_no_password_change_doesnt_invalidate_session(self):
self.request.session = self.client.session
self.middleware(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous)
def test_changed_password_invalidates_session(self):
# After password change, user should be anonymous
self.user.set_password("new_password")
self.user.save()
self.middleware(self.request)
self.assertIsNotNone(self.request.user)
self.assertTrue(self.request.user.is_anonymous)
# session should be flushed
self.assertIsNone(self.request.session.session_key)
def test_no_session(self):
msg = (
"The Django authentication middleware requires session middleware "
"to be installed. Edit your MIDDLEWARE setting to insert "
"'django.contrib.sessions.middleware.SessionMiddleware' before "
"'django.contrib.auth.middleware.AuthenticationMiddleware'."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.middleware(HttpRequest())
async def test_auser(self):
self.middleware(self.request)
auser = await self.request.auser()
self.assertEqual(auser, self.user)
auser_second = await self.request.auser()
self.assertIs(auser, auser_second)
async def test_auser_after_alogin(self):
self.middleware(self.request)
auser = await self.request.auser()
self.assertEqual(auser, self.user)
await alogin(self.request, self.user2)
auser_second = await self.request.auser()
self.assertEqual(auser_second, self.user2)
async def test_auser_after_alogout(self):
self.middleware(self.request)
auser = await self.request.auser()
self.assertEqual(auser, self.user)
await alogout(self.request)
auser_second = await self.request.auser()
self.assertTrue(auser_second.is_anonymous)
@override_settings(ROOT_URLCONF="auth_tests.urls")
@modify_settings(
MIDDLEWARE={"append": "django.contrib.auth.middleware.LoginRequiredMiddleware"}
)
| TestAuthenticationMiddleware |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_name/invalid_name_property.py | {
"start": 573,
"end": 775
} | class ____:
"""Test property setter for pattern set in attr-rgx."""
@property
def foo(self): # [invalid-name]
pass
@foo.setter
def FOOSETTER(self):
pass
| AnotherFooClass |
python | doocs__leetcode | solution/1000-1099/1039.Minimum Score Triangulation of Polygon/Solution2.py | {
"start": 0,
"end": 412
} | class ____:
def minScoreTriangulation(self, values: List[int]) -> int:
n = len(values)
f = [[0] * n for _ in range(n)]
for i in range(n - 3, -1, -1):
for j in range(i + 2, n):
f[i][j] = min(
f[i][k] + f[k][j] + values[i] * values[k] * values[j]
for k in range(i + 1, j)
)
return f[0][-1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/groups-of-special-equivalent-strings.py | {
"start": 33,
"end": 411
} | class ____(object):
def numSpecialEquivGroups(self, A):
"""
:type A: List[str]
:rtype: int
"""
def count(word):
result = [0]*52
for i, letter in enumerate(word):
result[ord(letter)-ord('a') + 26*(i%2)] += 1
return tuple(result)
return len({count(word) for word in A})
| Solution |
python | patrick-kidger__equinox | equinox/internal/_closure_to_pytree.py | {
"start": 2020,
"end": 3980
} | class ____(Module):
fn: _FunctionWithEquality
contents: tuple[Any, ...] | None
def __init__(self, fn: types.FunctionType):
self.fn = _FunctionWithEquality(fn)
if fn.__closure__ is None:
contents = None
else:
contents = tuple(
closure_to_pytree(cell.cell_contents) for cell in fn.__closure__
)
self.contents = contents
def __call__(self, *args, **kwargs):
if self.contents is None:
closure = None
else:
closure = tuple(_make_cell(contents) for contents in self.contents)
fn = _adjust_function_closure(self.fn.fn, closure)
return fn(*args, **kwargs)
def _fixup_closure(leaf):
if isinstance(leaf, types.FunctionType):
return _Closure(leaf)
else:
return leaf
def closure_to_pytree(tree):
"""Convert all function closures into pytree nodes.
**Arguments:**
- `tree`: Any pytree.
**Returns:**
A copy of `tree`, where all function closures have been replaced by a new object
that is (a) callable like the original function, but (b) iterates over its
`__closure__` as subnodes in the pytree.
!!! Example
```python
def some_fn():
a = jnp.array(1.)
@closure_to_pytree
def f(x):
return x + a
print(jax.tree_util.tree_leaves(f)) # prints out `a`
```
!!! Warning
One annoying technical detail in the above example: we had to wrap the whole lot
in a `some_fn`, so that we're in a local scope. Python treats functions at the
global scope differently, and this conversion won't result in any global
variable being treated as part of the pytree.
In practice, the intended use case of this function is to fix Optax, which
always uses local functions.
"""
return jtu.tree_map(_fixup_closure, tree)
| _Closure |
python | vyperlang__vyper | vyper/venom/passes/concretize_mem_loc.py | {
"start": 3819,
"end": 9730
} | class ____:
function: IRFunction
cfg: CFGAnalysis
mem_allocator: MemoryAllocator
liveat: dict[IRInstruction, OrderedSet[IRAbstractMemLoc]]
livesets: dict[IRAbstractMemLoc, OrderedSet[IRInstruction]]
used: dict[IRInstruction, OrderedSet[IRAbstractMemLoc]]
def __init__(
self,
function: IRFunction,
cfg: CFGAnalysis,
dfg: DFGAnalysis,
mem_allocator: MemoryAllocator,
):
self.function = function
self.cfg = cfg
self.dfg = dfg
self.used = defaultdict(OrderedSet)
self.liveat = defaultdict(OrderedSet)
self.mem_allocator = mem_allocator
def analyze(self):
upper_bound = self.function.num_basic_blocks**2 + 1
for _ in range(upper_bound):
change = False
# these parts of analysis are better (performance)
# in different orders so it is split into
# different loops
for bb in self.cfg.dfs_post_walk:
change |= self._handle_liveat(bb)
for bb in self.cfg.dfs_pre_walk:
change |= self._handle_used(bb)
if not change:
break
else:
raise CompilerPanic("Uppper bound in memory liveness reached")
self.livesets = defaultdict(OrderedSet)
for inst, mems in self.liveat.items():
for mem in mems:
if mem in self.used[inst]:
self.livesets[mem].add(inst)
def _handle_liveat(self, bb: IRBasicBlock) -> bool:
live: OrderedSet[IRAbstractMemLoc] = OrderedSet()
if len(succs := self.cfg.cfg_out(bb)) > 0:
for other in (self.liveat[succ.instructions[0]] for succ in succs):
live.update(other)
before = self.liveat[bb.instructions[0]]
for inst in reversed(bb.instructions):
write_op = get_memory_write_op(inst)
write_ops = self._find_base_ptrs(write_op)
read_op = get_memory_read_op(inst)
read_ops = self._find_base_ptrs(read_op)
for read_op in read_ops:
assert isinstance(read_op, IRAbstractMemLoc)
live.add(read_op.without_offset())
if inst.opcode == "invoke":
label = inst.operands[0]
assert isinstance(label, IRLabel)
fn = self.function.ctx.get_function(label)
# this lets us deallocate internal
# function memory after it's dead
live.addmany(self.mem_allocator.mems_used[fn])
for op in inst.operands:
if isinstance(op, IRAbstractMemLoc):
# this case is for any buffers which are
# passed to invoke as a stack parameter.
live.add(op.without_offset())
self.liveat[inst] = live.copy()
for write_op in write_ops:
assert isinstance(write_op, IRAbstractMemLoc)
size = get_write_size(inst)
assert size is not None
if not isinstance(size, IRLiteral):
# if the size is not a literal then we do not handle it
continue
if write_op in live and size.value == write_op.size:
# if the memory segment is overriden completely
# we dont have to consider the memory location
# before this point live, since any values that
# are currently in there will be overriden either way
live.remove(write_op.without_offset())
if write_op._id in (op._id for op in read_ops):
# the instruction reads and writes from the same memory
# location, we cannot remove it from the liveset
live.add(write_op.without_offset())
if before != self.liveat[bb.instructions[0]]:
return True
return False
def _handle_used(self, bb: IRBasicBlock) -> bool:
# this is to get positions where the memory location
# are used/already used so we dont allocate
# memory before the place where it is firstly used
curr: OrderedSet[IRAbstractMemLoc] = OrderedSet(self.function.allocated_args.values())
if len(preds := self.cfg.cfg_in(bb)) > 0:
for other in (self.used[pred.instructions[-1]] for pred in preds):
curr.update(other)
before = self.used[bb.instructions[-1]]
for inst in bb.instructions:
for op in inst.operands:
if not isinstance(op, IRAbstractMemLoc):
continue
curr.add(op.without_offset())
if inst.opcode == "invoke":
label = inst.operands[0]
assert isinstance(label, IRLabel)
fn = self.function.ctx.get_function(label)
curr.addmany(self.mem_allocator.mems_used[fn])
self.used[inst] = curr.copy()
return before != curr
def _find_base_ptrs(self, op: Optional[IROperand]) -> set[IRAbstractMemLoc]:
if op is None:
return set()
if isinstance(op, IRAbstractMemLoc):
return {op}
if not isinstance(op, IRVariable):
return set()
inst = self.dfg.get_producing_instruction(op)
assert inst is not None
if inst.opcode == "gep":
mem = inst.operands[0]
return self._find_base_ptrs(mem)
elif inst.opcode == "assign":
mem = inst.operands[0]
return self._find_base_ptrs(mem)
elif inst.opcode == "phi":
res = set()
for _, var in inst.phi_operands:
src = self._find_base_ptrs(var)
res.update(src)
return res
return set()
| MemLiveness |
python | huggingface__transformers | src/transformers/models/nllb_moe/modeling_nllb_moe.py | {
"start": 19478,
"end": 24110
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: Optional[float] = 0.0,
is_decoder: Optional[bool] = False,
bias: Optional[bool] = True,
is_causal: Optional[bool] = False,
config: Optional[NllbMoeConfig] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
is_cross_attention = key_value_states is not None
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
| NllbMoeAttention |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/utils.py | {
"start": 19307,
"end": 22188
} | class ____:
def __init__(
self,
*,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
) -> None:
"""Utility to filter the root event in the astream_events implementation.
This is simply binding the arguments to the namespace to make save on
a bit of typing in the astream_events implementation.
"""
self.include_names = include_names
self.include_types = include_types
self.include_tags = include_tags
self.exclude_names = exclude_names
self.exclude_types = exclude_types
self.exclude_tags = exclude_tags
def include_event(self, event: StreamEvent, root_type: str) -> bool:
"""Determine whether to include an event."""
if (
self.include_names is None
and self.include_types is None
and self.include_tags is None
):
include = True
else:
include = False
event_tags = event.get("tags") or []
if self.include_names is not None:
include = include or event["name"] in self.include_names
if self.include_types is not None:
include = include or root_type in self.include_types
if self.include_tags is not None:
include = include or any(tag in self.include_tags for tag in event_tags)
if self.exclude_names is not None:
include = include and event["name"] not in self.exclude_names
if self.exclude_types is not None:
include = include and root_type not in self.exclude_types
if self.exclude_tags is not None:
include = include and all(
tag not in self.exclude_tags for tag in event_tags
)
return include
def is_async_generator(
func: Any,
) -> TypeGuard[Callable[..., AsyncIterator]]:
"""Check if a function is an async generator.
Args:
func: The function to check.
Returns:
`True` if the function is an async generator, `False` otherwise.
"""
return inspect.isasyncgenfunction(func) or (
hasattr(func, "__call__") # noqa: B004
and inspect.isasyncgenfunction(func.__call__)
)
def is_async_callable(
func: Any,
) -> TypeGuard[Callable[..., Awaitable]]:
"""Check if a function is async.
Args:
func: The function to check.
Returns:
`True` if the function is async, `False` otherwise.
"""
return asyncio.iscoroutinefunction(func) or (
hasattr(func, "__call__") # noqa: B004
and asyncio.iscoroutinefunction(func.__call__)
)
| _RootEventFilter |
python | numba__numba | numba/cuda/tests/cudapy/cache_usecases.py | {
"start": 5036,
"end": 5834
} | class ____(CUDATestCase):
"""
Tests for functionality of this module's functions.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
self.assertPreciseEqual(mod.add_usecase(2, 3), 6)
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.assertPreciseEqual(mod.outer(3, 2), 2)
packed_rec = mod.record_return_packed(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(packed_rec), (2, 43.5))
aligned_rec = mod.record_return_aligned(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(aligned_rec), (2, 43.5))
mod.simple_usecase_caller(2)
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
| _TestModule |
python | scipy__scipy | scipy/optimize/_nonlin.py | {
"start": 18449,
"end": 19328
} | class ____(Jacobian):
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
| GenericBroyden |
python | tornadoweb__tornado | tornado/test/locale_test.py | {
"start": 179,
"end": 3040
} | class ____(unittest.TestCase):
# TODO: less hacky way to get isolated tests
SAVE_VARS = ["_translations", "_supported_locales", "_use_gettext"]
def clear_locale_cache(self):
tornado.locale.Locale._cache = {}
def setUp(self):
self.saved = {} # type: dict
for var in TranslationLoaderTest.SAVE_VARS:
self.saved[var] = getattr(tornado.locale, var)
self.clear_locale_cache()
def tearDown(self):
for k, v in self.saved.items():
setattr(tornado.locale, k, v)
self.clear_locale_cache()
def test_csv(self):
tornado.locale.load_translations(
os.path.join(os.path.dirname(__file__), "csv_translations")
)
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.CSVLocale))
self.assertEqual(locale.translate("school"), "\u00e9cole")
def test_csv_bom(self):
with open(
os.path.join(os.path.dirname(__file__), "csv_translations", "fr_FR.csv"),
"rb",
) as f:
char_data = to_unicode(f.read())
# Re-encode our input data (which is utf-8 without BOM) in
# encodings that use the BOM and ensure that we can still load
# it. Note that utf-16-le and utf-16-be do not write a BOM,
# so we only test whichver variant is native to our platform.
for encoding in ["utf-8-sig", "utf-16"]:
tmpdir = tempfile.mkdtemp()
try:
with open(os.path.join(tmpdir, "fr_FR.csv"), "wb") as f:
f.write(char_data.encode(encoding))
tornado.locale.load_translations(tmpdir)
locale = tornado.locale.get("fr_FR")
self.assertIsInstance(locale, tornado.locale.CSVLocale)
self.assertEqual(locale.translate("school"), "\u00e9cole")
finally:
shutil.rmtree(tmpdir)
def test_gettext(self):
tornado.locale.load_gettext_translations(
os.path.join(os.path.dirname(__file__), "gettext_translations"),
"tornado_test",
)
locale = tornado.locale.get("fr_FR")
self.assertTrue(isinstance(locale, tornado.locale.GettextLocale))
self.assertEqual(locale.translate("school"), "\u00e9cole")
self.assertEqual(locale.pgettext("law", "right"), "le droit")
self.assertEqual(locale.pgettext("good", "right"), "le bien")
self.assertEqual(locale.pgettext("organization", "club", "clubs", 1), "le club")
self.assertEqual(
locale.pgettext("organization", "club", "clubs", 2), "les clubs"
)
self.assertEqual(locale.pgettext("stick", "club", "clubs", 1), "le b\xe2ton")
self.assertEqual(locale.pgettext("stick", "club", "clubs", 2), "les b\xe2tons")
| TranslationLoaderTest |
python | realpython__materials | instance-class-static-methods/pizza.py | {
"start": 0,
"end": 856
} | class ____:
def __init__(self, toppings):
self.toppings = list(toppings)
def __repr__(self):
return f"Pizza({self.toppings})"
def add_topping(self, topping):
self.toppings.append(topping)
def remove_topping(self, topping):
if topping in self.toppings:
self.toppings.remove(topping)
@classmethod
def margherita(cls):
return cls(["mozzarella", "tomatoes"])
@classmethod
def prosciutto(cls):
return cls(["mozzarella", "tomatoes", "ham"])
@staticmethod
def get_size_in_inches(size):
"""Returns an approximate diameter in inches for common sizes."""
size_map = {"small": 8, "medium": 12, "large": 16}
return size_map.get(size, "Unknown size")
if __name__ == "__main__":
a_pizza = Pizza.margherita()
print(a_pizza, "😋🍕")
| Pizza |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 116865,
"end": 116990
} | class ____(packaging.requirements.InvalidRequirement):
"Compatibility wrapper for InvalidRequirement"
| RequirementParseError |
python | weaviate__weaviate-python-client | weaviate/collections/classes/aggregate.py | {
"start": 1627,
"end": 2043
} | class ____:
"""The aggregation result for a date property."""
count: Optional[int]
maximum: Optional[str]
median: Optional[str]
minimum: Optional[str]
mode: Optional[str]
AggregateResult = Union[
AggregateInteger,
AggregateNumber,
AggregateText,
AggregateBoolean,
AggregateDate,
AggregateReference,
]
AProperties = Dict[str, AggregateResult]
@dataclass
| AggregateDate |
python | getsentry__sentry | src/sentry/sentry_metrics/consumers/last_seen_updater.py | {
"start": 1078,
"end": 1397
} | class ____:
"""
A filter over messages coming from a stream. Can be used to pre filter
messages during consumption but potentially for other use cases as well.
"""
@abstractmethod
def should_drop(self, message: Message[KafkaPayload]) -> bool:
raise NotImplementedError
| StreamMessageFilter |
python | scrapy__scrapy | scrapy/pipelines/files.py | {
"start": 9355,
"end": 12131
} | class ____:
GCS_PROJECT_ID = None
CACHE_CONTROL = "max-age=172800"
# The bucket's default object ACL will be applied to the object.
# Overridden from settings.FILES_STORE_GCS_ACL in FilesPipeline.from_crawler().
POLICY = None
def __init__(self, uri: str):
from google.cloud import storage # noqa: PLC0415
client = storage.Client(project=self.GCS_PROJECT_ID)
bucket, prefix = uri[5:].split("/", 1)
self.bucket = client.bucket(bucket)
self.prefix: str = prefix
permissions = self.bucket.test_iam_permissions(
["storage.objects.get", "storage.objects.create"]
)
if "storage.objects.get" not in permissions:
logger.warning(
"No 'storage.objects.get' permission for GSC bucket %(bucket)s. "
"Checking if files are up to date will be impossible. Files will be downloaded every time.",
{"bucket": bucket},
)
if "storage.objects.create" not in permissions:
logger.error(
"No 'storage.objects.create' permission for GSC bucket %(bucket)s. Saving files will be impossible!",
{"bucket": bucket},
)
def stat_file(
self, path: str, info: MediaPipeline.SpiderInfo
) -> Deferred[StatInfo]:
def _onsuccess(blob) -> StatInfo:
if blob:
checksum = base64.b64decode(blob.md5_hash).hex()
last_modified = time.mktime(blob.updated.timetuple())
return {"checksum": checksum, "last_modified": last_modified}
return {}
blob_path = self._get_blob_path(path)
return cast(
"Deferred[StatInfo]",
deferToThread(self.bucket.get_blob, blob_path).addCallback(_onsuccess),
)
def _get_content_type(self, headers: dict[str, str] | None) -> str:
if headers and "Content-Type" in headers:
return headers["Content-Type"]
return "application/octet-stream"
def _get_blob_path(self, path: str) -> str:
return self.prefix + path
def persist_file(
self,
path: str,
buf: BytesIO,
info: MediaPipeline.SpiderInfo,
meta: dict[str, Any] | None = None,
headers: dict[str, str] | None = None,
) -> Deferred[Any]:
blob_path = self._get_blob_path(path)
blob = self.bucket.blob(blob_path)
blob.cache_control = self.CACHE_CONTROL
blob.metadata = {k: str(v) for k, v in (meta or {}).items()}
return deferToThread(
blob.upload_from_string,
data=buf.getvalue(),
content_type=self._get_content_type(headers),
predefined_acl=self.POLICY,
)
| GCSFilesStore |
python | realpython__materials | asterioids-pygame-project/source_code_step_7/space_rocks/models.py | {
"start": 800,
"end": 1947
} | class ____(GameObject):
MANEUVERABILITY = 3
ACCELERATION = 0.25
BULLET_SPEED = 3
def __init__(self, position, create_bullet_callback):
self.create_bullet_callback = create_bullet_callback
# Make a copy of the original UP vector
self.direction = Vector2(UP)
super().__init__(position, load_sprite("spaceship"), Vector2(0))
def rotate(self, clockwise=True):
sign = 1 if clockwise else -1
angle = self.MANEUVERABILITY * sign
self.direction.rotate_ip(angle)
def accelerate(self):
self.velocity += self.direction * self.ACCELERATION
def draw(self, surface):
angle = self.direction.angle_to(UP)
rotated_surface = rotozoom(self.sprite, angle, 1.0)
rotated_surface_size = Vector2(rotated_surface.get_size())
blit_position = self.position - rotated_surface_size * 0.5
surface.blit(rotated_surface, blit_position)
def shoot(self):
bullet_velocity = self.direction * self.BULLET_SPEED + self.velocity
bullet = Bullet(self.position, bullet_velocity)
self.create_bullet_callback(bullet)
| Spaceship |
python | pypa__packaging | tests/test_requirements.py | {
"start": 17598,
"end": 20800
} | class ____:
def test_types_with_nothing(self) -> None:
# GIVEN
to_parse = "foobar"
# WHEN
req = Requirement(to_parse)
# THEN
assert isinstance(req.name, str)
assert isinstance(req.extras, set)
assert req.url is None
assert isinstance(req.specifier, SpecifierSet)
assert req.marker is None
def test_types_with_specifier_and_marker(self) -> None:
# GIVEN
to_parse = "foobar[quux]<2,>=3; os_name=='a'"
# WHEN
req = Requirement(to_parse)
# THEN
assert isinstance(req.name, str)
assert isinstance(req.extras, set)
assert req.url is None
assert isinstance(req.specifier, SpecifierSet)
assert isinstance(req.marker, Marker)
def test_types_with_url(self) -> None:
req = Requirement("foobar @ http://foo.com")
assert isinstance(req.name, str)
assert isinstance(req.extras, set)
assert isinstance(req.url, str)
assert isinstance(req.specifier, SpecifierSet)
assert req.marker is None
@pytest.mark.parametrize(
"url_or_specifier",
["", " @ https://url ", "!=2.0", "==2.*"],
)
@pytest.mark.parametrize("extras", ["", "[a]", "[a,b]", "[a1,b1,b2]"])
@pytest.mark.parametrize(
"marker",
["", '; python_version == "3.11"', '; "3." not in python_version'],
)
def test_str_and_repr(
self, extras: str, url_or_specifier: str, marker: str
) -> None:
# GIVEN
to_parse = f"name{extras}{url_or_specifier}{marker}"
# WHEN
req = Requirement(to_parse)
# THEN
assert str(req) == to_parse.strip()
assert repr(req) == f"<Requirement({to_parse.strip()!r})>"
@pytest.mark.parametrize(("dep1", "dep2"), EQUAL_DEPENDENCIES)
def test_equal_reqs_equal_hashes(self, dep1: str, dep2: str) -> None:
"""Requirement objects created from equal strings should be equal."""
# GIVEN / WHEN
req1, req2 = Requirement(dep1), Requirement(dep2)
assert req1 == req2
assert hash(req1) == hash(req2)
@pytest.mark.parametrize(("dep1", "dep2"), EQUIVALENT_DEPENDENCIES)
def test_equivalent_reqs_equal_hashes_unequal_strings(
self, dep1: str, dep2: str
) -> None:
"""Requirement objects created from equivalent strings should be equal,
even though their string representation will not."""
# GIVEN / WHEN
req1, req2 = Requirement(dep1), Requirement(dep2)
assert req1 == req2
assert hash(req1) == hash(req2)
assert str(req1) != str(req2)
@pytest.mark.parametrize(("dep1", "dep2"), DIFFERENT_DEPENDENCIES)
def test_different_reqs_different_hashes(self, dep1: str, dep2: str) -> None:
"""Requirement objects created from non-equivalent strings should differ."""
# GIVEN / WHEN
req1, req2 = Requirement(dep1), Requirement(dep2)
# THEN
assert req1 != req2
assert hash(req1) != hash(req2)
def test_compare_with_string(self) -> None:
assert Requirement("packaging>=21.3") != "packaging>=21.3"
| TestRequirementBehaviour |
python | encode__django-rest-framework | rest_framework/generics.py | {
"start": 8025,
"end": 8447
} | class ____(mixins.ListModelMixin,
mixins.CreateModelMixin,
GenericAPIView):
"""
Concrete view for listing a queryset or creating a model instance.
"""
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
| ListCreateAPIView |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.