language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/search/eap/columns.py | {
"start": 1496,
"end": 3462
} | class ____:
# The alias for this column
public_alias: (
str # `p95() as foo` has the public alias `foo` and `p95()` has the public alias `p95()`
)
# The public type for this column
search_type: constants.SearchType
# The internal rpc type for this column, optional as it can mostly be inferred from search_type
internal_type: AttributeKey.Type.ValueType | None = None
# Processor is the function run in the post process step to transform a row into the final result
processor: Callable[[Any], Any] | None = None
# Validator to check if the value in a query is correct
validator: Callable[[Any], bool] | list[Callable[[Any], bool]] | None = None
# Indicates this attribute is a secondary alias for the attribute.
# It exists for compatibility or convenience reasons and should NOT be preferred.
secondary_alias: bool = False
def process_column(self, value: Any) -> Any:
"""Given the value from results, return a processed value if a processor is defined otherwise return it"""
if self.processor:
return self.processor(value)
return value
def validate(self, value: Any) -> None:
if callable(self.validator):
if self.validator(value):
return
raise InvalidSearchQuery(f"{value} is an invalid value for {self.public_alias}")
elif isinstance(self.validator, Iterable):
for validator in self.validator:
if validator(value):
return
raise InvalidSearchQuery(f"{value} is an invalid value for {self.public_alias}")
@property
def proto_type(self) -> AttributeKey.Type.ValueType:
"""The proto's AttributeKey type for this column"""
if self.internal_type is not None:
return self.internal_type
else:
return constants.TYPE_MAP[self.search_type]
@dataclass(frozen=True, kw_only=True)
| ResolvedColumn |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 3058,
"end": 4994
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def test_raise_in_both_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer(every_secs=2.0, every_steps=10)
@test_util.run_deprecated_v1
def test_raise_in_none_secs_and_steps(self):
with self.assertRaises(ValueError):
basic_session_run_hooks.SecondOrStepTimer()
@test.mock.patch.object(time, 'time')
def test_every_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
timer = basic_session_run_hooks.SecondOrStepTimer(every_secs=1.0)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
mock_time.return_value += 1.0
self.assertFalse(timer.should_trigger_for_step(1))
self.assertTrue(timer.should_trigger_for_step(2))
def test_every_steps(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=3)
self.assertTrue(timer.should_trigger_for_step(1))
timer.update_last_triggered_step(1)
self.assertFalse(timer.should_trigger_for_step(1))
self.assertFalse(timer.should_trigger_for_step(2))
self.assertFalse(timer.should_trigger_for_step(3))
self.assertTrue(timer.should_trigger_for_step(4))
def test_update_last_triggered_step(self):
timer = basic_session_run_hooks.SecondOrStepTimer(every_steps=1)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(1)
self.assertEqual(None, elapsed_secs)
self.assertEqual(None, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(5)
self.assertLess(0, elapsed_secs)
self.assertEqual(4, elapsed_steps)
elapsed_secs, elapsed_steps = timer.update_last_triggered_step(7)
self.assertLess(0, elapsed_secs)
self.assertEqual(2, elapsed_steps)
| SecondOrStepTimerTest |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 93172,
"end": 99540
} | class ____(Request):
"""
Create or update a new model for a task
:param task: Task id
:type task: str
:param uri: URI for the model. Exactly one of uri or override_model_id is a
required.
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param override_model_id: Override model ID. If provided, this model is updated
in the task. Exactly one of override_model_id or uri is required.
:type override_model_id: str
:param iteration: Iteration (used to update task statistics)
:type iteration: int
"""
_service = "models"
_action = "update_for_task"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"iteration": {
"description": "Iteration (used to update task statistics)",
"type": "integer",
},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"override_model_id": {
"description": "Override model ID. If provided, this model is updated in the task. Exactly one of override_model_id or uri is required.",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Task id", "type": "string"},
"uri": {
"description": "URI for the model. Exactly one of uri or override_model_id is a required.",
"type": "string",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
uri: Optional[str] = None,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
override_model_id: Optional[str] = None,
iteration: Optional[int] = None,
**kwargs: Any
) -> None:
super(UpdateForTaskRequest, self).__init__(**kwargs)
self.task = task
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.override_model_id = override_model_id
self.iteration = iteration
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("uri")
def uri(self) -> Optional[str]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[str]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("override_model_id")
def override_model_id(self) -> Optional[str]:
return self._property_override_model_id
@override_model_id.setter
def override_model_id(self, value: Optional[str]) -> None:
if value is None:
self._property_override_model_id = None
return
self.assert_isinstance(value, "override_model_id", six.string_types)
self._property_override_model_id = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
| UpdateForTaskRequest |
python | kamyu104__LeetCode-Solutions | Python/reduction-operations-to-make-the-array-elements-equal.py | {
"start": 33,
"end": 369
} | class ____(object):
def reductionOperations(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
result = curr = 0
for i in xrange(1, len(nums)):
if nums[i-1] < nums[i]:
curr += 1
result += curr
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-snowflake-cortex/integration_tests/integration_test.py | {
"start": 406,
"end": 15223
} | class ____(BaseIntegrationTest):
def _init_snowflake_cortex(self):
pass
def setUp(self):
with open("secrets/config.json", "r") as f:
self.config = json.loads(f.read())
self._init_snowflake_cortex()
def tearDown(self):
pass
def test_check_valid_config(self):
outcome = DestinationSnowflakeCortex().check(logging.getLogger("airbyte"), self.config)
assert outcome.status == Status.SUCCEEDED
def test_check_invalid_config(self):
outcome = DestinationSnowflakeCortex().check(
logging.getLogger("airbyte"),
{
"processing": {
"text_fields": ["str_col"],
"chunk_size": 1000,
"metadata_fields": ["int_col"],
},
"embedding": {"mode": "openai", "openai_key": "mykey"},
"indexing": {
"host": "MYACCOUNT",
"role": "MYUSERNAME",
"warehouse": "MYWAREHOUSE",
"database": "MYDATABASE",
"default_schema": "MYSCHEMA",
"username": "MYUSERNAME",
"credentials": {"password": "xxxxxxx"},
},
},
)
assert outcome.status == Status.FAILED
def _get_db_connection(self):
return connector.connect(
account=self.config["indexing"]["host"],
role=self.config["indexing"]["role"],
warehouse=self.config["indexing"]["warehouse"],
database=self.config["indexing"]["database"],
schema=self.config["indexing"]["default_schema"],
user=self.config["indexing"]["username"],
password=self.config["indexing"]["credentials"]["password"],
)
def _get_record_count(self, table_name):
"""Return the number of records in the table."""
conn = self._get_db_connection()
cursor = conn.cursor()
cursor.execute(f"SELECT COUNT(*) FROM {table_name};")
result = cursor.fetchone()
cursor.close()
conn.close()
return result[0]
def _get_all_records(self, table_name) -> list[dict[str, Any]]:
"""Return all records from the table as a list of dictionaries."""
conn = self._get_db_connection()
cursor = conn.cursor()
cursor.execute(f"SELECT * FROM {table_name};")
column_names = [desc[0] for desc in cursor.description]
result: list[dict[str, Any]] = []
for row in cursor.fetchall():
result.append(dict(zip(column_names, row)))
cursor.close()
conn.close()
return result
def _delete_table(self, table_name):
conn = self._get_db_connection()
cursor = conn.cursor()
cursor.execute(f"DROP TABLE IF EXISTS {table_name};")
conn.commit()
conn.close()
def _run_cosine_similarity(self, query_vector, table_name):
conn = self._get_db_connection()
cursor = conn.cursor()
query = f"""
SELECT DOCUMENT_CONTENT
FROM {table_name}
ORDER BY VECTOR_L2_DISTANCE(
CAST({query_vector} AS VECTOR(FLOAT, 1536)),
embedding
)
LIMIT 1
"""
cursor.execute(query)
result = cursor.fetchone()
cursor.close()
conn.close()
return result
def test_write(self):
self._delete_table("mystream")
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
first_record = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(5)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
_ = list(
destination.write(
config=self.config,
configured_catalog=catalog,
input_messages=[*first_record, first_state_message],
)
)
assert self._get_record_count("mystream") == 5
# subsequent sync with append
append_catalog = self._get_configured_catalog(DestinationSyncMode.append)
list(
destination.write(
config=self.config,
configured_catalog=append_catalog,
input_messages=[self._record("mystream", "Cats are nice", 6), first_state_message],
)
)
assert self._get_record_count("mystream") == 6
def test_write_and_replace(self):
self._delete_table("mystream")
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
first_five_records = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(5)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
list(
destination.write(
config=self.config,
configured_catalog=catalog,
input_messages=[*first_five_records, first_state_message],
)
)
assert self._get_record_count("mystream") == 5
# subsequent sync with append
append_catalog = self._get_configured_catalog(DestinationSyncMode.append)
list(
destination.write(
config=self.config,
configured_catalog=append_catalog,
input_messages=[self._record("mystream", "Cats are nice", 6), first_state_message],
)
)
assert self._get_record_count("mystream") == 6
# subsequent sync with append_dedup
append_dedup_catalog = self._get_configured_catalog(DestinationSyncMode.append_dedup)
list(
destination.write(
config=self.config,
configured_catalog=append_dedup_catalog,
input_messages=[
self._record("mystream", "Cats are nice too", 4),
first_state_message,
],
)
)
# TODO: FIXME: This should be 6, but it's 7 because the deduplication is not working
assert self._get_record_count("mystream") == 6
# comment the following so we can use fake for testing
# embeddings = OpenAIEmbeddings(openai_api_key=self.config["embedding"]["openai_key"])
# result = self._run_cosine_similarity(embeddings.embed_query("feline animals"), "mystream")
# assert(len(result) == 1)
# result[0] == "str_col: Cats are nice"
def test_overwrite_mode_deletes_records(self):
self._delete_table("mystream")
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
first_four_records = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(4)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
list(destination.write(self.config, catalog, [*first_four_records, first_state_message]))
assert self._get_record_count("mystream") == 4
# following should replace existing records
append_catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
list(
destination.write(
config=self.config,
configured_catalog=append_catalog,
input_messages=[self._record("mystream", "Cats are nice", 6), first_state_message],
)
)
assert self._get_record_count("mystream") == 1
def test_record_write_fidelity(self):
self._delete_table("mystream")
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
records = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(1)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
list(destination.write(self.config, catalog, [*records, first_state_message]))
assert self._get_record_count("mystream") == 1
first_written_record = self._get_all_records("mystream")[0]
assert list(first_written_record.keys()) == [
"DOCUMENT_ID",
"CHUNK_ID",
"METADATA",
"DOCUMENT_CONTENT",
"EMBEDDING",
]
assert first_written_record.pop("EMBEDDING")
assert first_written_record.pop("CHUNK_ID")
metadata = first_written_record.pop("METADATA")
_ = metadata
# TODO: Fix the data type issue here (currently stringified):
# assert isinstance(metadata, dict), f"METADATA should be a dict: {metadata}"
# assert metadata["int_col"] == 0
assert first_written_record == {
"DOCUMENT_ID": "Stream_mystream_Key_0",
"DOCUMENT_CONTENT": '"str_col: Dogs are number 0"',
}
def test_write_with_chunk_size_5(self):
self._delete_table("mystream")
self.config["processing"]["chunk_size"] = 5
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
first_record = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(5)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
_ = list(
destination.write(
config=self.config,
configured_catalog=catalog,
input_messages=[*first_record, first_state_message],
)
)
assert self._get_record_count("mystream") == 15
# subsequent sync with append
append_catalog = self._get_configured_catalog(DestinationSyncMode.append)
list(
destination.write(
config=self.config,
configured_catalog=append_catalog,
input_messages=[self._record("mystream", "Cats are nice", 6), first_state_message],
)
)
assert self._get_record_count("mystream") == 18
# subsequent sync with append_dedup
append_dedup_catalog = self._get_configured_catalog(DestinationSyncMode.append_dedup)
list(
destination.write(
config=self.config,
configured_catalog=append_dedup_catalog,
input_messages=[
self._record("mystream", "Cats are nice too", 4),
first_state_message,
],
)
)
assert self._get_record_count("mystream") == 18
def test_write_fidelity_with_chunk_size_5(self):
self._delete_table("mystream")
self.config["processing"]["chunk_size"] = 5
catalog = self._get_configured_catalog(DestinationSyncMode.overwrite)
first_state_message = self._state({"state": "1"})
records = [
self._record(
stream="mystream",
str_value=f"Dogs are number {i}",
int_value=i,
)
for i in range(1)
]
# initial sync with replace
destination = DestinationSnowflakeCortex()
list(destination.write(self.config, catalog, [*records, first_state_message]))
assert self._get_record_count("mystream") == 3
first_written_record = self._get_all_records("mystream")[0]
second_written_record = self._get_all_records("mystream")[1]
third_written_record = self._get_all_records("mystream")[2]
assert list(first_written_record.keys()) == [
"DOCUMENT_ID",
"CHUNK_ID",
"METADATA",
"DOCUMENT_CONTENT",
"EMBEDDING",
]
assert first_written_record.pop("EMBEDDING")
assert first_written_record.pop("CHUNK_ID")
metadata = first_written_record.pop("METADATA")
_ = metadata
assert first_written_record == {
"DOCUMENT_ID": "Stream_mystream_Key_0",
"DOCUMENT_CONTENT": '"str_col:"',
}
assert second_written_record["DOCUMENT_ID"] == "Stream_mystream_Key_0"
assert second_written_record["DOCUMENT_CONTENT"] == '"Dogs are"'
assert third_written_record["DOCUMENT_ID"] == "Stream_mystream_Key_0"
assert third_written_record["DOCUMENT_CONTENT"] == '"number 0"'
"""
Following tests are not code specific, but are useful to confirm that the Cortex functions are available and behaving as expcected
"""
def test_cortex_functions_available(self):
conn = self._get_db_connection()
cursor = conn.cursor()
query = """
SELECT SNOWFLAKE.CORTEX.EXTRACT_ANSWER(
$$Apple Vision Pro comprises approximately 300 components.[40] It has a curved laminated glass display on the front, an aluminum frame on its sides, a flexible cushion on the inside, and a removable, adjustable headband. The frame contains five sensors, six microphones, and 12 cameras.$$,
'How many cameras are there on the product?'
) AS answer
"""
try:
cursor.execute(query)
except Exception as e:
self.fail(f"Cortex functions might not be available in database: {e}")
def test_get_embeddings_using_cortex(self):
conn = self._get_db_connection()
cur = conn.cursor()
document_content_list = ["dogs are number 1", "dogs are number 2", "cats are nummber 1"]
cur.execute("""
CREATE TEMPORARY TABLE temp_document_content (
document_content STRING
)
""")
cur.executemany(
"INSERT INTO temp_document_content (document_content) VALUES (%s)",
document_content_list,
)
cur.execute("""
SELECT snowflake.cortex.embed_text('e5-base-v2', document_content) AS embedding
FROM temp_document_content
""")
processed_data = cur.fetchall()
self.assertTrue(processed_data, "No data found in the database")
cur.execute("DROP TABLE temp_document_content")
cur.close()
conn.close()
| SnowflakeCortexIntegrationTest |
python | ansible__ansible | lib/ansible/_internal/_yaml/_errors.py | {
"start": 298,
"end": 470
} | class ____(ConstructorError):
"""Ansible-specific ConstructorError used to bypass exception analysis during wrapping in AnsibleYAMLParserError."""
| AnsibleConstructorError |
python | pydata__xarray | xarray/groupers.py | {
"start": 6186,
"end": 11190
} | class ____(Grouper):
"""
Grouper object for grouping by a categorical variable.
Parameters
----------
labels: array-like, optional
Group labels to aggregate on. This is required when grouping by a chunked array type
(e.g. dask or cubed) since it is used to construct the coordinate on the output.
Grouped operations will only be run on the specified group labels. Any group that is not
present in ``labels`` will be ignored.
"""
_group_as_index: pd.Index | None = field(default=None, repr=False, init=False)
labels: ArrayLike | None = field(default=None)
@property
def group_as_index(self) -> pd.Index:
"""Caches the group DataArray as a pandas Index."""
if self._group_as_index is None:
if self.group.ndim == 1:
self._group_as_index = self.group.to_index()
else:
self._group_as_index = pd.Index(np.array(self.group).ravel())
return self._group_as_index
def reset(self) -> Self:
return type(self)()
def factorize(self, group: T_Group) -> EncodedGroups:
self.group = group
if is_chunked_array(group.data) and self.labels is None:
raise ValueError(
"When grouping by a dask array, `labels` must be passed using "
"a UniqueGrouper object."
)
if self.labels is not None:
return self._factorize_given_labels(group)
index = self.group_as_index
is_unique_and_monotonic = isinstance(self.group, _DummyGroup) or (
index.is_unique
and (index.is_monotonic_increasing or index.is_monotonic_decreasing)
)
is_dimension = self.group.dims == (self.group.name,)
can_squeeze = is_dimension and is_unique_and_monotonic
if can_squeeze:
return self._factorize_dummy()
else:
return self._factorize_unique()
def _factorize_given_labels(self, group: T_Group) -> EncodedGroups:
codes = apply_ufunc(
_factorize_given_labels,
group,
kwargs={"labels": self.labels},
dask="parallelized",
output_dtypes=[np.int64],
keep_attrs=True,
)
return EncodedGroups(
codes=codes,
full_index=pd.Index(self.labels), # type: ignore[arg-type]
unique_coord=Variable(
dims=codes.name,
data=self.labels,
attrs=self.group.attrs,
),
)
def _factorize_unique(self) -> EncodedGroups:
# look through group to find the unique values
sort = not isinstance(self.group_as_index, pd.MultiIndex)
unique_values, codes_ = unique_value_groups(self.group_as_index, sort=sort)
if array_all(codes_ == -1):
raise ValueError(
"Failed to group data. Are you grouping by a variable that is all NaN?"
)
codes = self.group.copy(data=codes_.reshape(self.group.shape), deep=False)
unique_coord = Variable(
dims=codes.name, data=unique_values, attrs=self.group.attrs
)
full_index = (
unique_values
if isinstance(unique_values, pd.MultiIndex)
else pd.Index(unique_values)
)
return EncodedGroups(
codes=codes,
full_index=full_index,
unique_coord=unique_coord,
coords=coordinates_from_variable(unique_coord),
)
def _factorize_dummy(self) -> EncodedGroups:
size = self.group.size
# no need to factorize
# use slices to do views instead of fancy indexing
# equivalent to: group_indices = group_indices.reshape(-1, 1)
group_indices: GroupIndices = tuple(slice(i, i + 1) for i in range(size))
size_range = np.arange(size)
full_index: pd.Index
unique_coord: _DummyGroup | Variable
if isinstance(self.group, _DummyGroup):
codes = self.group.to_dataarray().copy(data=size_range)
unique_coord = self.group
full_index = pd.RangeIndex(self.group.size)
coords = Coordinates()
else:
codes = self.group.copy(data=size_range, deep=False)
unique_coord = self.group.variable.to_base_variable()
full_index = self.group_as_index
if isinstance(full_index, pd.MultiIndex):
coords = Coordinates.from_pandas_multiindex(
full_index, dim=self.group.name
)
else:
if TYPE_CHECKING:
assert isinstance(unique_coord, Variable)
coords = coordinates_from_variable(unique_coord)
return EncodedGroups(
codes=codes,
group_indices=group_indices,
full_index=full_index,
unique_coord=unique_coord,
coords=coords,
)
@dataclass
| UniqueGrouper |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/glue_crawler.py | {
"start": 1231,
"end": 3375
} | class ____(AwsBaseSensor[GlueCrawlerHook]):
"""
Waits for an AWS Glue crawler to reach any of the statuses below.
'FAILED', 'CANCELLED', 'SUCCEEDED'
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:GlueCrawlerSensor`
:param crawler_name: The AWS Glue crawler unique name
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = GlueCrawlerHook
template_fields: Sequence[str] = aws_template_fields(
"crawler_name",
)
def __init__(self, *, crawler_name: str, **kwargs) -> None:
super().__init__(**kwargs)
self.crawler_name = crawler_name
self.success_statuses = "SUCCEEDED"
self.errored_statuses = ("FAILED", "CANCELLED")
def poke(self, context: Context):
self.log.info("Poking for AWS Glue crawler: %s", self.crawler_name)
crawler_state = self.hook.get_crawler(self.crawler_name)["State"]
if crawler_state == "READY":
self.log.info("State: %s", crawler_state)
crawler_status = self.hook.get_crawler(self.crawler_name)["LastCrawl"]["Status"]
if crawler_status == self.success_statuses:
self.log.info("Status: %s", crawler_status)
return True
raise AirflowException(f"Status: {crawler_status}")
return False
| GlueCrawlerSensor |
python | scipy__scipy | scipy/spatial/distance.py | {
"start": 45615,
"end": 46501
} | class ____:
metric_name: str
def __call__(self, XA, XB, *, out=None, **kwargs):
XA = np.ascontiguousarray(XA)
XB = np.ascontiguousarray(XB)
mA, n = XA.shape
mB, _ = XB.shape
metric_name = self.metric_name
metric_info = _METRICS[metric_name]
XA, XB, typ, kwargs = _validate_cdist_input(
XA, XB, mA, mB, n, metric_info, **kwargs)
w = kwargs.pop('w', None)
if w is not None:
metric = metric_info.dist_func
return _cdist_callable(
XA, XB, metric=metric, out=out, w=w, **kwargs)
dm = _prepare_out_argument(out, np.float64, (mA, mB))
# get cdist wrapper
cdist_fn = getattr(_distance_wrap, f'cdist_{metric_name}_{typ}_wrap')
cdist_fn(XA, XB, dm, **kwargs)
return dm
@dataclasses.dataclass(frozen=True)
| CDistMetricWrapper |
python | has2k1__plotnine | tests/test_layout.py | {
"start": 691,
"end": 4992
} | class ____:
g = (
ggplot(mtcars, aes(x="wt", y="mpg", color="factor(gear)"))
+ geom_point()
+ labs( # New
x="Weight",
y="Miles Per Gallon",
title="Relationship between Weight and Fuel Efficiency in Cars",
subtitle="Should we be driving lighter cars?",
caption=(
"The plot shows a negative correlation between car weight\n"
"and fuel efficiency, with lighter cars generally achieving\n"
"higher miles per gallon"
),
)
)
def test_default(self):
assert self.g == "default"
def test_axis_title_x_justification(self):
p = self.g + theme(axis_title_x=element_text(ha=0.2))
assert p == "axis_title_x_justification"
def test_axis_title_y_justification(self):
p = self.g + theme(axis_title_y=element_text(va=0.8))
assert p == "axis_title_y_justification"
def test_plot_title_justification(self):
p = self.g + theme(plot_title=element_text(ha=1))
assert p == "plot_title_justification"
def test_legend_on_top(self):
p = self.g + theme(legend_position="top")
assert p == "legend_at_top"
def test_legend_on_the_left(self):
p = self.g + theme(legend_position="left")
assert p == "legend_on_the_left"
def test_legend_at_the_bottom(self):
p = self.g + theme(legend_position="bottom")
assert p == "legend_at_the_bottom"
def test_turn_off_guide(self):
p1 = self.g + theme(legend_position="none")
p2 = self.g + guides(color="none")
p3 = self.g + guides(color=False)
assert p1 == "turn_off_guide"
assert p2 == "turn_off_guide"
assert p3 == "turn_off_guide"
def test_legends_in_different_positions(self):
p = (
self.g
+ aes(color="gear", fill="am", shape="factor(cyl)", alpha="vs")
+ guides(
shape=guide_legend(position="bottom"),
color=guide_legend(position="left"),
alpha=guide_legend(position="left"),
)
)
assert p == "legends_in_different_positions"
def test_facet_grid(self):
p = self.g + facet_grid("am", "gear")
assert p == "facet_grid"
def test_facet_wrap(self):
p = self.g + facet_wrap("carb", nrow=2)
assert p == "facet_wrap"
def test_facet_wrap_scales_free(self):
p = self.g + facet_wrap("carb", scales="free")
assert p == "facet_wrap_scales_free"
def test_plot_margin_aspect_ratio(self):
# The margin should be exact in both directions even if
# the figure has an aspect ratio != 1.
p = (
ggplot()
+ geom_blank()
+ theme(plot_margin=0.025, figure_size=(4, 3))
)
assert p == "plot_margin_aspect_ratio"
def test_plot_margin_protruding_axis_text(self):
data = pd.DataFrame({"x": np.arange(5), "y": np.arange(5) - 0.2})
p = (
ggplot(data, aes("x", "y"))
+ geom_point()
+ scale_y_continuous(
labels=["0", "1", "2", "3", "four-four-four-four"]
)
+ labs(title="Protruding Axis Text")
+ theme(
axis_text_y=element_text(
rotation=(0, 0, 0, 0, 90),
color=("black", "black", "black", "black", "red"),
va="center",
),
)
)
assert p == "plot_margin_protruding_axis_text"
def test_colorbar_frame(self):
p = self.g + aes(color="gear") + black_frame
assert p == "colorbar_frame"
def test_different_colorbar_themes(self):
p = (
self.g
+ aes(color="gear", fill="am")
+ guides(
color=guide_colorbar(theme=black_frame),
fill=guide_colorbar(theme=red_frame),
)
)
assert p == "different_colorbar_themes"
def test_plot_titles_and_caption_positioning(self):
p = self.g + theme(
plot_title_position="plot",
plot_caption_position="plot",
)
assert p == "plot_titles_and_caption_positioning"
| TestLayout |
python | pytorch__pytorch | test/test_autograd.py | {
"start": 248298,
"end": 379190
} | class ____(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
# Run on cuda if it is available to ensure that the worker thread
# is properly initialized by the time we exit.
device = "cuda" if torch.cuda.is_available() else "cpu"
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True, device=device)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
# The autograd engine creates worker threads only when GPU devices are present.
# So make sure that we do shutdown threads when we're testing cuda and make sure
# that there is no thread to shutdown when we're not using cuda.
if TEST_CUDA or torch.backends.mps.is_available() or torch.xpu.is_available():
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
else:
self.assertNotRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(
IS_MACOS,
"Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941",
)
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp),
)
feat_combined = []
for _ in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r, use_reentrant=True)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def _test_checkpointing_non_reentrant_autocast(self, device_type):
for enabled in [True, False]:
def foo(x, y, z):
# torch.mm is on autocast's list of ops that should run in
# the autocast precision
x = torch.mm(x, y)
y = torch.mm(x, z)
z = torch.mm(z, z)
expected_dtype = torch.float32 if not enabled else torch.bfloat16
self.assertEqual(expected_dtype, z.dtype)
return z
x = torch.randn(3, 3, requires_grad=True)
y = torch.randn(3, 3, requires_grad=True)
z = torch.randn(3, 3, requires_grad=True)
if device_type == "cuda":
x = x.cuda()
y = y.cuda()
z = z.cuda()
with torch.autocast(
enabled=enabled, device_type=device_type, dtype=torch.bfloat16
):
loss = checkpoint(foo, x, y, z, use_reentrant=False)
loss = loss.sum()
# Without saving + recasting the autocast type, would raise error in autograd
# about mismatched dtypes.
loss.backward() # triggers recomputation to check it runs in bfloat
def test_checkpointing_non_reentrant_autocast_cpu(self):
"""
Test that autocast args such as the dtype are preserved during non-reentrant
checkpoint recomputation on CPU.
"""
self._test_checkpointing_non_reentrant_autocast(device_type="cpu")
@unittest.skipIf(
not torch.cuda.is_available() or not torch.cuda.is_bf16_supported(),
"Test requires CUDA bf16 support",
)
def test_checkpointing_non_reentrant_autocast_gpu(self):
"""
Test that autocast args/kwargs such as the dtype are preserved during
non-reentrant checkpoint recomputation on GPU.
"""
self._test_checkpointing_non_reentrant_autocast(device_type="cuda")
@unittest.skipIf(not torch.cuda.is_available(), "Test requires CUDA")
@slowTest
def test_checkpointing_without_reentrant_memory_savings(self):
class MyModel(nn.Module):
def __init__(self, n, use_checkpoint, use_reentrant):
super().__init__()
self.n = n
self.use_checkpoint = use_checkpoint
self.use_reentrant = use_reentrant
self.layers = nn.ModuleList()
for _ in range(self.n):
layer = nn.Sequential(
nn.Linear(256, 256), nn.Linear(256, 256), nn.Linear(256, 256)
)
self.layers.append(layer)
# pre-allocate the grad so that increased memory usage is mainly
# due to activations.
for layer in self.layers:
for lin in layer:
lin.weight.grad = torch.ones_like(lin.weight)
lin.bias.grad = torch.ones_like(lin.bias)
def forward(self, x):
for i in range(self.n):
if not self.use_checkpoint:
x = self.layers[i](x)
else:
x = checkpoint(
self.layers[i], x, use_reentrant=self.use_reentrant
)
return x
model_no_checkpoint = MyModel(
8, use_checkpoint=False, use_reentrant=False
).cuda()
model_reentrant_checkpoint = MyModel(
8, use_checkpoint=True, use_reentrant=True
).cuda()
model_no_reentrant_checkpoint = MyModel(
8, use_checkpoint=True, use_reentrant=False
).cuda()
x = torch.randn(100, 256, requires_grad=True, device="cuda")
torch.cuda.reset_peak_memory_stats()
loss = model_no_checkpoint(x.clone()).sum()
loss.backward()
mem_no_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_reentrant_checkpoint = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
loss = model_no_reentrant_checkpoint(x.clone()).sum()
loss.backward()
mem_no_reentrant_checkpoint = torch.cuda.max_memory_allocated()
self.assertTrue(mem_reentrant_checkpoint < mem_no_checkpoint)
self.assertTrue(mem_no_reentrant_checkpoint < mem_no_checkpoint)
def test_checkpointing_without_reentrant_custom_function_works(self):
msg = "Unpack is being triggered for a tensor that was already unpacked once"
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y, z):
w = x * y * z
out = w + w
ctx.save_for_backward(x, y, z, w, out)
return out
@staticmethod
def backward(ctx, grad_out):
x, y, z, w, out = ctx.saved_tensors
# Accessing the saved Tensors a second time will raise because
# recomputed tensors get cleared as soon as they are unpacked.
# A recomputation is only triggered if your backward has a new
# graph-task id.
with self.assertRaisesRegex(RuntimeError, msg):
x_2, y_2, z_2, w_2, out_2 = ctx.saved_tensors
return x, y, z
x = torch.tensor(1.0, requires_grad=True)
y = torch.tensor(2.0, requires_grad=True)
z = torch.tensor(3.0, requires_grad=True)
def foo(x, y, z):
x = x * y * z
y = y * y * z
z = z * z
out = MyFunc.apply(x, y, z)
return out
out = checkpoint(foo, x, y, z, use_reentrant=False)
out.sum().backward()
def test_checkpointing_without_reentrant_with_context_fn(self):
class VerboseTorchDispatchMode(TorchDispatchMode):
def __init__(self) -> None:
self.operators = []
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
self.operators.append(func.__name__)
return func(*args, **kwargs)
x = torch.tensor(1.0, requires_grad=True)
verbose_mode = VerboseTorchDispatchMode()
def context_fn():
return verbose_mode, contextlib.nullcontext()
out = checkpoint(
lambda x: x.exp(), x, use_reentrant=False, context_fn=context_fn
)
self.assertEqual(verbose_mode.operators, ["exp.default"])
verbose_mode.operators = []
def context_fn():
return contextlib.nullcontext(), verbose_mode
out = checkpoint(
lambda x: x.exp(), x, use_reentrant=False, context_fn=context_fn
)
out.backward()
self.assertEqual(verbose_mode.operators, ["exp.default", "detach.default"])
with self.assertRaisesRegex(
Exception, "only supported when use_reentrant=False"
):
out = checkpoint(
lambda x: x.sin(), x, use_reentrant=True, context_fn=context_fn
)
def test_checkpoint_warns_if_use_reentrant_not_passed_explcitly(self):
a = torch.randn(1, requires_grad=True)
# Passing explicitly should not warn
self.assertNotWarn(lambda: checkpoint(lambda x: x, a, use_reentrant=False))
# Not passing explicitly warns
with self.assertWarnsOnceRegex(
UserWarning, ".*the use_reentrant parameter should be passed explicitly.*"
):
checkpoint(lambda x: x, a)
def test_checkpoint_sequential_warns_if_use_reentrant_not_passed_explcitly(self):
a = torch.randn(3, requires_grad=True)
modules_list = [
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
]
# Passing explicitly should not warn
self.assertNotWarn(
lambda: checkpoint_sequential(modules_list, 3, a, use_reentrant=False)
)
# Not passing explicitly warns
with self.assertWarnsOnceRegex(
UserWarning, ".*the use_reentrant parameter should be passed explicitly.*"
):
checkpoint_sequential(modules_list, 3, a)
@skipIfTorchDynamo("GraphExecGroup does not support compile")
def test_checkpoint_graph_execution_group(self):
def run(use_graph_execution_group):
counter = [0]
def fn(x):
counter[0] += 1
y = x.sin().cos()
z = y.sin().cos()
return y, z
x = torch.randn(3, 3, requires_grad=True)
y, z = checkpoint(fn, x, use_reentrant=False)
group = torch.utils.checkpoint.GraphExecGroup()
ctx = contextlib.nullcontext()
if use_graph_execution_group:
ctx = group
with ctx:
(grad_y,) = torch.autograd.grad(
z, inputs=(y,), grad_outputs=(torch.ones(3, 3),)
)
(grad_x,) = torch.autograd.grad(
y,
inputs=(x,),
grad_outputs=(grad_y,),
)
if use_graph_execution_group:
self.assertEqual(counter[0], 2)
else:
self.assertEqual(counter[0], 3)
run(use_graph_execution_group=True)
run(use_graph_execution_group=False)
# Test the not actually disjoint case (using retain_graph=True since
# otherwise autograd itself will catch this)
def fn(x):
return x.sin().cos()
x = torch.randn(3, 3, requires_grad=True)
out = checkpoint(fn, x, use_reentrant=False)
with torch.utils.checkpoint.GraphExecGroup():
# Under this context, we will enforce that two backward are disjoint
# even if retain_graph=True.
out.sum().backward(retain_graph=True)
with self.assertRaisesRegex(
RuntimeError, "Performing two backward calls that overlap"
):
out.sum().backward()
def test_checkpoint_detects_non_determinism(self):
def save_3_tensors(x):
out = x.sin().exp()
out = out.sin()
return out
def save_2_tensors(x):
return x.sin().exp()
def save_2_tensors_alt(x):
return x.sin() * torch.tensor([1.0, 2.0])
def get_non_det_fn(orig_fn, recompute_fn):
counter = [0]
def fn(x):
if counter[0] == 0:
counter[0] += 1
return orig_fn(x)
else:
return recompute_fn(x)
return fn
a = torch.randn(1, requires_grad=True)
# Save fewer tensors during recompute
fn = get_non_det_fn(orig_fn=save_3_tensors, recompute_fn=save_2_tensors)
with self.assertRaisesRegex(
RuntimeError, "A different number of tensors was saved"
):
out = checkpoint(fn, a, use_reentrant=False)
out.backward()
# Save more tensors during recompute
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_3_tensors)
with torch.utils.checkpoint.set_checkpoint_early_stop(False):
with self.assertRaisesRegex(
RuntimeError, "trying to save more tensors during recomputation"
):
out = checkpoint(fn, a, use_reentrant=False)
out.backward()
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_3_tensors)
# If early stopping is enabled, we would not raise (the results would be correct anyway)
out = checkpoint(fn, a, use_reentrant=False)
out.backward()
# Save the same number of tensors but the shape is different
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_2_tensors_alt)
with self.assertRaisesRegex(RuntimeError, "tensors have different metadata"):
out = checkpoint(fn, a, use_reentrant=False)
out.backward()
# Get the debug message if debug=True
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_2_tensors_alt)
with self.assertRaisesRegex(
RuntimeError,
"You are seeing this error because you passed `debug=True` to checkpoint",
):
out = checkpoint(fn, a, use_reentrant=False, debug=True)
out.backward()
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_2_tensors_alt)
with self.assertRaisesRegex(
RuntimeError,
"You are seeing this error because you passed `debug=True` to checkpoint",
):
with torch.utils.checkpoint.set_checkpoint_debug_enabled(True):
out = checkpoint(fn, a, use_reentrant=False, debug=False)
out.backward()
fn = get_non_det_fn(orig_fn=save_2_tensors, recompute_fn=save_2_tensors_alt)
with self.assertRaisesRegex(
RuntimeError, "Recomputed values for the following tensors have different"
):
with torch.utils.checkpoint.set_checkpoint_debug_enabled(False):
out = checkpoint(fn, a, use_reentrant=False, debug=True)
out.backward()
def test_access_saved_tensor_twice_without_recomputation_works(self):
count = [0]
def foo(a):
count[0] += 1
b = a * a
c = a * b
d = torch.exp(a)
return d
a = torch.randn(5, requires_grad=True)
d = checkpoint(foo, a, use_reentrant=False)
self.assertEqual(count[0], 1)
# Recomputed variables only persist within a particular backward call.
# If _saved_result is accessed outside of a backward, it will trigger
# a recompute. And afterwards, those recomputed results are immediately
# cleared.
d.grad_fn._saved_result
self.assertEqual(count[0], 2)
# Second access will trigger another recompute
d.grad_fn._saved_result
self.assertEqual(count[0], 3)
# Backward clears the saved variable
d.sum().backward()
self.assertEqual(count[0], 4)
# Now it raises an error
with self.assertRaisesRegex(
RuntimeError,
"or directly access saved tensors after they have already been freed",
):
d.grad_fn._saved_result
@slowTest
@parametrize("input_requires_grad", [True, False])
def test_checkpointing_without_reentrant(self, input_requires_grad):
"""
Basic test for checkpoint without reentrant autograd.
"""
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp),
)
# Module holder for testing activation checkpointing with no_reentrant
# supports kwargs.
class MyModule(nn.Module):
def __init__(self, mod):
super().__init__()
self.module = mod
def forward(self, data):
return self.module(data)
module = MyModule(mod=module)
# Run model with and without checkpointing and verify gradients are
# equivalent, regardless of if inputs require grads or not.
module_copy = deepcopy(module)
feat_combined = []
feat_combined_no_checkpoint = []
for _ in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = input_requires_grad
data_r_copy = data_r.clone()
feat_r = checkpoint(module, data=data_r, use_reentrant=False)
feat_combined.append(feat_r)
feat_r_no_checkpoint = module_copy(data_r)
feat_combined_no_checkpoint.append(feat_r_no_checkpoint)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
mean_combined_no_checkpoint = torch.stack(feat_combined_no_checkpoint).mean()
mean_combined_no_checkpoint.backward()
for checkpoint_param, param in zip(
module.parameters(), module_copy.parameters()
):
self.assertEqual(checkpoint_param.grad, param.grad)
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(
Exception, "torch.utils.checkpoint is incompatible"
):
b = checkpoint(torch.exp, a, use_reentrant=True).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a, use_reentrant=True).sum()
c.backward()
@parametrize("use_reentrant", [True, False])
def test_checkpointing_without_reentrant_detached_tensor(self, use_reentrant):
class NoGradModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
self.lin2 = nn.Linear(2, 2, bias=False)
def forward(self, x):
with torch.no_grad():
return self.lin2(self.linear(x))
module = NoGradModule()
err_ctx = (
self.assertRaisesRegex(
RuntimeError, "none of output has requires_grad=True"
)
if use_reentrant
else contextlib.nullcontext()
)
a = torch.randn(2, 2, requires_grad=True)
for _ in range(3):
with err_ctx:
# out does not require grad
out = checkpoint(module, a, use_reentrant=use_reentrant)
# Make loss require grad, otherwise we would run into
# "element 0 of tensors does not require grad and does not have a grad_fn"
out += a
out.sum().backward()
def test_checkpointing_without_reentrant_saved_object_identity(self):
x_backward = None
class Test(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.save_for_backward(y)
return x
@staticmethod
def backward(ctx, x):
nonlocal x_backward
(x_backward,) = ctx.saved_tensors
return x, None
a = torch.tensor(1.0, requires_grad=True)
b = torch.tensor(1.0, requires_grad=False)
Test.apply(a, b).backward()
self.assertIs(b, x_backward)
x_backward = None
checkpoint(Test.apply, a, b, use_reentrant=False).backward()
self.assertIs(b, x_backward)
def test_checkpointing_without_reentrant_correct_grad(self):
"""
Verifies that correct gradients are calculated for checkpoint
without reentrant autograd, for both backward() and autograd.grad().
"""
a = torch.randn(2, 2, requires_grad=True)
b = torch.exp(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
c = checkpoint(torch.exp, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
a.grad = None
d = checkpoint(torch.exp, a, use_reentrant=False).sum()
(d_grad,) = torch.autograd.grad(d, (a,))
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
@skipIfXpu(msg="torch._C._scatter Not implemented on XPU, issue #143239")
def test_checkpointing_without_reentrant_dataparallel(self):
"""
Verifies gradient correctness when checkpoint without reentrant autograd
is used in conjunction with DataParallel.
"""
class LinearModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(2, 2, bias=False)
def forward(self, inp):
return self.linear(inp)
a = torch.randn(2, 2, requires_grad=True)
if torch.cuda.is_available():
a = a.cuda()
model = LinearModule()
if torch.cuda.is_available():
model = model.cuda()
b = deepcopy(model)(a).sum()
b.backward()
b_grad = a.grad
a.grad = None
module = torch.nn.DataParallel(deepcopy(model))
c = checkpoint(module, a, use_reentrant=False).sum()
c.backward()
c_grad = a.grad
self.assertEqual(b_grad, c_grad)
def test_checkpointing_without_reentrant_parameter_used_in_an_out(self):
"""
Ensures that gradient hooks are only called once per tensor.
"""
w = torch.randn(10, 10, requires_grad=True)
count = 0
def hook(grad):
nonlocal count
count += 1
w.register_hook(hook)
x = torch.rand(10, 10, requires_grad=True)
h = w * x # Using w outside the checkpoint
out = checkpoint(
lambda x: w * x, h, use_reentrant=False
) # Using w inside the checkpoint
out.sum().backward()
# should only call hook once
self.assertEqual(count, 1)
def test_checkpointing_without_reentrant_arbitrary_input_output(self):
"""
Ensures checkpointing without reentrant autograd works with functions
with arbitrary input/output structures.
"""
class MyModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer = torch.nn.Linear(5, 5, bias=False)
def forward(self, dict_input):
tensor = dict_input["tensor"]
return {"result": self.layer(tensor)}
model_no_checkpoint = MyModel()
model_checkpoint_without_reentrant = deepcopy(model_no_checkpoint)
inp = {"tensor": torch.randn(5, 5)}
out_no_checkpoint = model_no_checkpoint(inp)["result"].sum()
out_checkpoint = checkpoint(
model_checkpoint_without_reentrant, inp, use_reentrant=False
)["result"].sum()
self.assertEqual(out_checkpoint, out_no_checkpoint)
out_no_checkpoint.backward()
out_checkpoint.backward()
for param, checkpoint_param in zip(
model_no_checkpoint.parameters(),
model_checkpoint_without_reentrant.parameters(),
):
self.assertEqual(param.grad, checkpoint_param.grad)
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_callback_propagates_errors_from_device_thread(self):
def callback():
raise RuntimeError("blah")
def hook_with_callback(*args):
torch.autograd.Variable._execution_engine.queue_callback(callback)
t = torch.tensor([1.0, 2.0], requires_grad=True, device=torch.device("cuda"))
t.register_hook(hook_with_callback)
output = t**2
loss = output.sum()
with self.assertRaisesRegex(RuntimeError, "blah"):
loss.backward()
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(ret["outer"], 1)
self.assertEqual(ret["inner"], 0)
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(ret["outer"], 0)
self.assertEqual(ret["inner"], 1)
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(ret["outer"], 1)
self.assertEqual(ret["inner"], 1)
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = param * param
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = param * param
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.zeros(1, requires_grad=True)
out1 = torch.stack([a, b], dim=0)
out2 = (a * 2) * b
# TODO: I don't think we have a backward saving a list of tensors
# at the moment. It used to be stack, but for no reason...
# see discussion in #84993
# self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TewnsorList -> Tuple[Tensor]
self.assertEqual(out2.grad_fn._saved_self, a * 2)
self.assertIsInstance(out2.grad_fn._saved_self, torch.Tensor)
self.assertIsInstance(
out2.grad_fn._raw_saved_self, torch._C._autograd.SavedTensor
)
self.assertEqual(out1.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out1.grad_fn._saved_dim, int)
out2.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out2.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out2.grad_fn._saved_self
# TODO: interestingly, this only happens if indexing into a list grad_fn._raw_saved_tensors[0],
# not when using a saved tensor, see discussion in #84993
# with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
# out2.grad_fn._raw_saved_self
self.assertEqual(out1.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(
out.grad_fn._saved_indices, (None, indices)
) # c10::List<std::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(
out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor
)
self.assertEqual(
out.grad_fn._saved_self_sym_sizes, a.shape
) # SymIntArrayRef -> Tuple[SymInt]
self.assertIsInstance(out.grad_fn._saved_self_sym_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
out = a.mean()
self.assertEqual(
out.grad_fn._saved_self_sym_sizes, a.shape
) # IntArrayRef -> Tuple[int]
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(
out.grad_fn._saved_output_size, (4,)
) # std::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
if hasattr(out.grad_fn, "_saved_scale_factors"):
self.assertIsNone(
out.grad_fn._saved_scale_factors
) # std::optional<ArrayRef<double>> -> float[]?
else:
self.assertIsNone(
out.grad_fn._saved_scales
) # std::optional<ArrayRef<double>> -> float[]?
a = torch.ones(1, 1, 3, 3, requires_grad=True)
out = nn.Conv2d(1, 1, 3)(a)
self.assertEqual(
out.grad_fn._saved_bias_sym_sizes_opt, (1,)
) # std::optional<SymIntArrayRef> -> SymInt[]?
out = nn.Conv2d(1, 1, 3, bias=False)(a)
# TODO: This is BAD! we converted a std::nullopt into a (0,)
self.assertEqual(out.grad_fn._saved_bias_sym_sizes_opt, (0,))
a = torch.ones(1, 3, 3, requires_grad=True)
out = torch.addbmm(a.squeeze(0), a, a)
self.assertEqual(out.grad_fn._saved_batch1_sym_argsize_0, 1) # int64_t
self.assertEqual(out.grad_fn._saved_batch1_sym_argsize_1, 3) # int64_t
a = torch.ones(1, 1, 3, 3, requires_grad=True)
out = torch.nn.functional.unfold(a, 3)
self.assertEqual(out.grad_fn._saved_self_sym_argsize_minus_2, 3) # SymInt
self.assertEqual(out.grad_fn._saved_self_sym_argsize_minus_1, 3) # SymInt
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertEqual(out.grad_fn._saved_scales, 0.5)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.0) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.0)
self.assertEqual(out.grad_fn._saved_eps, 1.0) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.0], requires_grad=True)
out = torch.div(a, 2.0, rounding_mode="trunc")
self.assertEqual(
out.grad_fn._saved_rounding_mode, "trunc"
) # std::optional<std::string> -> str?
out = torch.div(a, 2.0, rounding_mode=None)
self.assertIsNone(
out.grad_fn._saved_rounding_mode
) # std::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(
out.grad_fn._saved_threshold, complex
) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(
out.grad_fn._saved_threshold, complex
) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1.0, value=1.0)
self.assertIsInstance(
out.grad_fn._saved_threshold, float
) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(
out.grad_fn._saved_threshold, int
) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(
out.grad_fn._saved_threshold, bool
) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(
out.grad_fn._saved_storage_offset, 1
) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(
out.grad_fn._saved_weight, torch.ones((5,))
) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
num_tensors = 3
input_tensors = [
torch.ones(2, 2, requires_grad=True) for _ in range(num_tensors)
]
scalars = [
0.0 for _ in range(num_tensors)
] # ArrayRef<Scalar> -> Tuple[Scalar, ...]
results = torch._foreach_maximum(input_tensors, scalars)
for t in results:
self.assertEqual(t.grad_fn._saved_scalars, scalars)
def test_get_data_and_hooks_from_raw_saved_variable(self):
def pack_hook(t):
return t
def unpack_hook(t):
return t
a = torch.tensor(2.0, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook):
b = a**2
c = b.exp()
d = c**2
pow_sv = b.grad_fn._raw_saved_self
exp_sv = c.grad_fn._raw_saved_result
pow2_sv = d.grad_fn._raw_saved_self
# Returns the packed object as-is
self.assertTrue(pow_sv.data is a)
self.assertTrue(pow_sv.unpack_hook is unpack_hook)
# Returns the detached data when the output/leaf is saved
self.assertFalse(exp_sv.data is c)
self.assertIsNone(exp_sv.unpack_hook)
# Returns the un-detached data when input is saved
self.assertTrue(pow2_sv.data is c)
self.assertIsNone(pow2_sv.unpack_hook)
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(
RuntimeError,
"Trying to create a SavedTensor object from Python is forbidden",
):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(lambda x: x)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
saved[0].register_hooks(1, 1)
saved[0].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "already been set"):
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_node_isinstance(self):
# Node is a "virtual" base class of codegen'd nodes. This means that
# isinstance and issubclass are overridden, but mro is unchanged
Node = torch.autograd.graph.Node
a = torch.rand(3, 3, requires_grad=True)
b = a.exp()
# Some nodes have codegened registrations to the torch._C._function module
self.assertIsInstance(b.grad_fn, Node)
self.assertTrue(issubclass(type(b.grad_fn), Node))
self.assertTrue(Node not in type(b.grad_fn).mro())
# Other nodes have manual registrations to the torch._C._function module
self.assertNotIsInstance(torch._C._functions.AccumulateGrad, Node)
self.assertTrue(issubclass(torch._C._functions.AccumulateGrad, Node))
self.assertIsInstance(b.grad_fn.next_functions[0][0], Node)
self.assertTrue(issubclass(torch._C._functions.DelayedError, Node))
# Special cases
self.assertNotIsInstance(None, Node)
self.assertNotIsInstance(1, Node)
self.assertNotIsInstance(Node, Node)
self.assertTrue(issubclass(Node, Node))
# Custom function case
self.assertTrue(issubclass(torch.autograd.function.BackwardCFunction, Node))
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
self.assertIsInstance(ctx, Node)
return x
@staticmethod
def backward(ctx, x):
self.assertIsInstance(ctx, Node)
return x
out = Func.apply(a)
self.assertIsInstance(out.grad_fn, Node)
self.assertTrue(issubclass(type(out.grad_fn), Node))
self.assertTrue(Node not in type(out.grad_fn).mro())
out.sum().backward()
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(
grad_mode=True,
requires_grad=False,
is_view=True,
should_raise_tuple=(None, None, None),
)
inp_change_err = (
"Output {} of UnbindBackward0 is a view and is being modified inplace."
)
run_test(
grad_mode=True,
requires_grad=True,
is_view=True,
should_raise_tuple=(
None,
inp_change_err.format("0"),
inp_change_err.format("1"),
),
)
leaf_grad_err = (
"A view was created in no_grad mode and is being modified inplace"
)
run_test(
grad_mode=False,
requires_grad=True,
is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err),
)
run_test(
grad_mode=False,
requires_grad=False,
is_view=True,
should_raise_tuple=(None, None, None),
)
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(
RuntimeError, "This view was created inside a custom Function"
):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(
RuntimeError, "This view was created inside a custom Function"
):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(
RuntimeError,
"This view is the output of a function that returns multiple views.",
):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, make_view, pure_view):
ctx._is_pure_view = pure_view
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view, pure_view):
ctx._is_pure_view = pure_view
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view, pure_view):
ctx._is_pure_view = pure_view
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
(a,) = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": (
"Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"
),
"two_output": (
"Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views.",
"Pure view custom Function can only have one input Tensor and one output Tensor."
" Open an issue if you need to support more.",
),
"view_of_temp": (
"Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function",
"a view of a leaf Variable that requires grad is being used in an in-place operation",
),
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
for pure_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = make_view or fn_id == "view_of_temp"
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(
a, b, make_view, pure_view
)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, make_view, pure_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view, pure_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id][
int(pure_view)
]
will_raise_error = (
(pure_view and fn_id == "two_output")
or (pure_view and fn_id == "view_of_temp" and inplace)
or (not pure_view and inplace and output_is_a_view)
)
if will_raise_error:
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(fn, (a, b), check_batched_grad=False)
else:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
expected_called = 1
expected_ga_nz = True
if will_raise_error:
expected_called = 0
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).abs().backward()
if (
fn_id == "one_output"
and inplace
and output_is_a_view
and pure_view
):
# We expect the op to have been replayed and we leveraged the pure view
# to re-create the graph, so the original backward was not called
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(
fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2
):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom":
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(
fn,
fn_type,
grad_mode_view,
grad_mode_iview,
requires_grad,
error1,
error2,
)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
def test_autograd_print_tensor(self):
a = torch.ones(1, requires_grad=True)
a_clone = a.clone()
self.assertEqual(repr(a), "tensor([1.], requires_grad=True)")
self.assertEqual(repr(a_clone), "tensor([1.], grad_fn=<CloneBackward0>)")
with torch.no_grad():
b = a[:]
b *= 2
# Special handling for printing view created in no-grad and modified
# in-placed in no-grad.
self.assertEqual(repr(b), "tensor([2.], grad_fn=<Invalid>)")
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x
c = Func.apply(a)
self.assertEqual(repr(c), "tensor([2.], grad_fn=<FuncBackward>)")
def test_autograd_inplace_view_of_view(self):
x = torch.zeros(2)
with torch.no_grad():
y = x.view(2)
y.requires_grad_(True)
z = y.view(2)
with self.assertRaisesRegex(
RuntimeError, "a view of a view .* is being .* inside the no_grad block"
):
z /= 2
x = torch.zeros(2)
with torch.inference_mode():
y = x.view(2)
y.requires_grad_(True)
z = y.view(2)
with self.assertRaisesRegex(
RuntimeError, "a view of a view .* is being .* inside the inference_mode"
):
z /= 2
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertEqual(non_inplace_grad, inplace_grad)
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
(a,) = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(
RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace",
):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = (
"your Function modifies inplace an input that is a view of another Tensor"
)
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_custom_function_mark_dirty_not_differentiable(self):
def get_custom_fn(jvp_err):
class InplaceMul(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
result = x.mul_(2)
ctx.mark_dirty(result)
return result
@staticmethod
def backward(ctx, grad_output):
pass
@staticmethod
def jvp(ctx, x_t):
if jvp_err:
return x_t
else:
return x_t.mul_(2)
return InplaceMul
for requires_grad, jvp_err in product([True, False], repeat=2):
InplaceMul = get_custom_fn(jvp_err)
# Make sure that tensor is always returned as-is if marked dirty
z = torch.tensor(1.0, requires_grad=requires_grad)
x = z.clone()
y = InplaceMul.apply(x)
self.assertTrue(x is y)
self.assertEqual(x, z * 2)
# jvp must properly modify the input grad if mark_dirty is set
with fwAD.dual_level():
x_tangent = torch.ones_like(x)
x_dual = fwAD.make_dual(x, x_tangent)
if jvp_err:
bad_mark_dirty_err = (
"jvp function must modify the corresponding gradient inplace"
)
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
InplaceMul.apply(x_dual)
else:
out_dual = InplaceMul.apply(x_dual)
_, out_tangent = fwAD.unpack_dual(out_dual)
self.assertTrue(out_dual is x_dual)
self.assertTrue(out_tangent is x_tangent)
def test_custom_function_mark_output_view_of_intermediate(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
out = inp.clone().view_as(inp)
ctx.mark_dirty(out)
return out
@staticmethod
def backward(ctx, gO):
pass
a = torch.tensor([1.0], requires_grad=True)
a_clone = a.clone()
with self.assertRaisesRegex(
RuntimeError, "received a tensor that was not an input."
):
Func.apply(a_clone)
def test_custom_function_inplace_on_non_default_view(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
inp.add_(1)
ctx.mark_dirty(inp)
return inp
@staticmethod
def backward(ctx, gO):
pass
a = torch.tensor([1.0, 2.0], requires_grad=True)
a_clone = a.clone()
b, c = a.split_with_sizes([1, 1], dim=0)
with self.assertRaisesRegex(
RuntimeError, "output of a function that returns multiple view"
):
Func.apply(b)
def test_custom_function_inplace_on_view_of_leaf(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
inp.add_(1)
ctx.mark_dirty(inp)
return inp
@staticmethod
def backward(ctx, gO):
pass
a = torch.tensor([1.0, 2.0], requires_grad=True)
b = a.view_as(a)
with self.assertRaisesRegex(
RuntimeError, "a view of a leaf Variable that requires grad"
):
Func.apply(b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((2, 1, 2, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(
*names[:-1]
)
z_complex.sum().abs().backward()
expected = torch.ones_like(z_complex).rename(None)
abs_1_1j = abs(1 + 1j)
expected.fill_(complex(abs_1_1j / 2, abs_1_1j / 2))
self.assertEqual(z.grad, torch.view_as_real(expected))
def test_custom_function_saving_mutated_view_no_leak(self):
class Test(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.mark_dirty(x)
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad):
pass
def scope():
x = torch.tensor(1.0, requires_grad=True).clone()
x = x.view_as(x)
y = Test.apply(x)
return weakref.ref(x)
ref = scope()
self.assertIsNone(ref())
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = (
"A view was created in no_grad mode and is being modified inplace"
)
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_custom_function_preserve_torch_function_when_return_as_is(self):
class Custom(torch.Tensor):
def __init__(self, data):
super().__init__()
self._data = data
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
kwargs = {} if kwargs is None else kwargs
args = tuple(a._data if isinstance(a, cls) else a for a in args)
out = func(*args, **kwargs)
if isinstance(out, torch.Tensor):
out = cls(out)
return out
class Fn(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx):
pass
x = Custom(torch.randn(2, 3))
y = Fn.apply(x)
self.assertTrue(isinstance(y, Custom))
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
(grad,) = torch.autograd.grad(foo**3, foo, grad_outputs=go)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0.0, 0.0, 0.0])
b = torch.tensor([-1.0, 0.0, 1.0], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0.0, 0.0]))
s = 0
b = torch.tensor([-1.0, 0.0, 1.0], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0.0, 0.0]))
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
class BadBw2(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
@staticmethod
def backward(ctx, foo):
return foo
@staticmethod
def vjp(ctx, foo):
return foo
class BadJvp(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement either the backward"):
BadBw.apply(inp).sum().backward()
with self.assertRaisesRegex(
RuntimeError, "Implementing both 'backward' and 'vjp'"
):
BadBw2.apply(inp).sum().backward()
with self.assertRaisesRegex(RuntimeError, "must implement the jvp function"):
with fwAD.dual_level():
d = fwAD.make_dual(inp, torch.rand_like(inp))
res = BadJvp.apply(d)
def test_custom_function_forward_mode_view_checks(self):
flag_to_error = {
"ok": None,
"not_a_view": "jvp is not returning a view",
"not_a_view_of_inp": "jvp is not returning a view of the given",
"not_a_view_of_inp_base": "jvp is not returning a view of the same base",
}
class ViewFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.flag = flag
ctx.size = foo.size()
return foo.narrow(0, 0, 2)
@staticmethod
def vjp(ctx, gO):
gI = gO.new_zeros(ctx.size)
gI.narrow(0, 0, 2).copy_(gO)
return gI, None
@staticmethod
def jvp(ctx, gI, _):
res = gI.narrow(0, 0, 2)
if ctx.flag != "ok":
# Break the view in the gradients!
res = res.clone()
if ctx.flag in ["not_a_view_of_inp", "not_a_view_of_inp_base"]:
# Result should be a view, just of the wrong thing
res = res.view_as(res)
return res
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
for flag, msg in flag_to_error.items():
def test_fn(inp):
if flag == "not_a_view_of_inp_base":
inp = inp.view_as(inp)
return ViewFn.apply(inp, flag)
if msg is None:
gradcheck(test_fn, inp, check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, msg):
gradcheck(test_fn, inp, check_forward_ad=True)
def test_custom_function_forward_mode_inplace_checks(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, foo, flag):
ctx.mark_dirty(foo)
ctx.flag = flag
foo.mul_(2)
return foo
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.flag:
# Don't do the change inplace
return 2 * gI
else:
gI.mul_(2)
return gI
inp = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
def test_fn(inp, flag):
inp = inp.clone()
return InplaceFn.apply(inp, flag)
gradcheck(test_fn, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(
RuntimeError,
"inplace custom Function is not modifying the forward mode gradients inplace",
):
gradcheck(test_fn, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_wrong_formula(self):
class UserFn(Function):
@staticmethod
def forward(ctx, foo, should_fail):
ctx.should_fail = should_fail
return foo * 2
@staticmethod
def vjp(ctx, gO):
return 2 * gO, None
@staticmethod
def jvp(ctx, gI, _):
if ctx.should_fail:
# Wrong gradient formula
return 3 * gI
else:
return 2 * gI
inp = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(UserFn.apply, (inp, False), check_forward_ad=True)
with self.assertRaisesRegex(
RuntimeError, "Jacobian computed with forward mode mismatch for output 0"
):
gradcheck(UserFn.apply, (inp, True), check_forward_ad=True)
def test_custom_function_forward_mode_non_tensor_before_tensor_args(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, nt, x, nt2, y):
return x * 2 + y * 3
@staticmethod
def jvp(ctx, nt, x_t, nt2, y_t):
self.assertIsNone(nt)
self.assertIsNone(nt2)
return x_t * 2 + y_t * 3
x = torch.tensor(1.0, dtype=torch.double)
t = torch.tensor(1.0, dtype=torch.double)
y = torch.tensor(1.0, dtype=torch.double)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, t)
MyFn.apply(1, dual_x, 1, y)
gradcheck(
MyFn.apply,
(1, x.requires_grad_(True), 1, y.requires_grad_(True)),
check_forward_ad=True,
check_backward_ad=False,
check_batched_grad=False,
)
def test_custom_function_forward_mode_forward_is_no_op(self):
error_regex = (
"A custom Function's forward is returning a view \\(or an input as-is\\)"
)
return_lambdas = {
# If we return an input as-is in forward, that is treated
# as if self.view_as(self) is performed. If jvp returns x.view_as(x),
# this is OK.
"view_as": lambda x: x.view_as(x),
# Expect this to raise an error
"self": lambda x: x,
# Expect this to raise the same error
"mul_by_2": lambda x: x * 2,
}
for k, fn in return_lambdas.items():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x + y, x
@staticmethod
def vjp(ctx, gO1, gO2):
return gO1 + gO2, gO1
@staticmethod
def jvp(ctx, x_t, y_t):
return x_t + y_t, fn(x_t)
a = torch.tensor(1.0, dtype=torch.double, requires_grad=True)
t = torch.tensor(1.0, dtype=torch.double)
b = torch.tensor(1.0, dtype=torch.double, requires_grad=True)
c = torch.tensor(1.0, dtype=torch.double)
t2 = torch.tensor(1.0, dtype=torch.double)
d = torch.tensor(1.0, dtype=torch.double)
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
c_dual = fwAD.make_dual(c, t2)
if k == "view_as":
_, out2 = MyFn.apply(a_dual, b)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t)
_, out2 = MyFn.apply(c_dual, d)
self.assertTrue(fwAD.unpack_dual(out2).tangent._base is t2)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(a_dual, b)
with self.assertRaisesRegex(RuntimeError, error_regex):
MyFn.apply(c_dual, d)
if k == "view_as":
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
else:
with self.assertRaisesRegex(RuntimeError, error_regex):
gradcheck(MyFn.apply, (a, c), check_forward_ad=True)
def test_custom_function_save_for_forward(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
ctx.save_for_backward(x, y)
ctx.save_for_forward(x, y)
ctx.z = z
ctx.prod = x * y
return z * ctx.prod
@staticmethod
def jvp(ctx, x_t, y_t, _):
x_p, y_p = ctx.saved_tensors
z = ctx.z
return z * (y_p * x_t + x_p * y_t)
@staticmethod
def vjp(ctx, grad_out):
x, y = ctx.saved_tensors
z = ctx.z
return z * grad_out * y, z * grad_out * x, None
a = torch.tensor(1.0, requires_grad=True, dtype=torch.double)
t = torch.tensor(1.0, dtype=torch.double)
b = torch.tensor(2.0, requires_grad=True, dtype=torch.double)
c = 4
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual, b, c)
out.backward()
gradcheck(Func.apply, (a, b, c), check_forward_ad=True)
# When saved for backward, but not saved for forward
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
ctx.save_for_backward(x)
return x.clone()
@staticmethod
def jvp(ctx, x_t):
self.assertEqual(len(ctx.saved_tensors), 0)
return x_t
@staticmethod
def vjp(ctx, grad_out):
(x,) = ctx.saved_tensors
self.assertEqual(len(ctx.saved_tensors), 1)
return grad_out
with fwAD.dual_level():
a_dual = fwAD.make_dual(a, t)
out = Func.apply(a_dual)
out.backward()
gradcheck(Func.apply, (a,), check_forward_ad=True)
@skipIfTorchDynamo("compile tested in test/dynamo/test_autograd_function.py")
def test_custom_function_forward_mode_non_differentiable(self):
# returns differentiable type, marked non-differentiable
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
out = y.clone()
ctx.mark_non_differentiable(out)
return x.clone(), out
@staticmethod
def jvp(ctx, x_tangent, y_tangent):
return x_tangent, None
x = torch.tensor(2.0)
x_tangent = torch.tensor(1.0)
y = torch.tensor(3.0)
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, x_tangent)
_, out2_dual = Func.apply(x_dual, y)
self.assertEqual(fwAD.unpack_dual(out2_dual).tangent, None)
y = torch.tensor(3)
# returns non-differentiable type, NOT marked non-differentiable
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return x.clone(), y.clone()
@staticmethod
def jvp(ctx, x_tangent, y_tangent):
self.assertIsNone(y_tangent)
return x_tangent, None
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, x_tangent)
_, out2_dual = Func.apply(x_dual, y)
self.assertEqual(fwAD.unpack_dual(out2_dual).tangent, None)
class FuncWrong(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
out = y.clone()
ctx.mark_non_differentiable(out)
return x.clone(), out
@staticmethod
def jvp(ctx, x_tangent, y_tangent):
return x_tangent, x_tangent.clone()
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, x_tangent)
with self.assertRaisesRegex(
RuntimeError, "You should return None at that position instead"
):
FuncWrong.apply(x_dual, y)
# returns non-tensor
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone(), object(), x.clone()
@staticmethod
def jvp(ctx, x_tangent):
return x_tangent, None, x_tangent
with fwAD.dual_level():
x_dual = fwAD.make_dual(x, x_tangent)
out_dual, _, out2_dual = Func.apply(x_dual)
self.assertEqual(fwAD.unpack_dual(out_dual).tangent, x_tangent)
self.assertEqual(fwAD.unpack_dual(out2_dual).tangent, x_tangent)
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(
inp,
sorted=sort,
return_inverse=return_inverse,
return_counts=return_counts,
)
assert_only_first_requires_grad(res)
res = torch.unique(
inp,
sorted=sort,
return_inverse=return_inverse,
return_counts=return_counts,
dim=0,
)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(
inp, return_inverse=return_inverse, return_counts=return_counts
)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(
inp,
return_inverse=return_inverse,
return_counts=return_counts,
dim=0,
)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(
inp,
dim=0,
sorted=sort,
return_inverse=return_inverse,
return_counts=return_counts,
)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(
inp,
sorted=sort,
return_inverse=return_inverse,
return_counts=return_counts,
)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
(x,) = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_create_graph_and_full_backward_hook_cycle(self):
# If BackwardHook saves grad_output, it can create a cycle when we perform backward
# with create_graph=True
#
# grad_output -> grad_output.grad_fn -> graph -> hook -> grad_output
#
class TestCls:
# Dummy class for the purpose of creating a weakref
pass
def get_ref(input_requires_grad, nb_hooks):
t = torch.randn(10, requires_grad=input_requires_grad)
a = torch.tensor(1.0, requires_grad=True)
class Test(nn.Module):
def forward(self, x):
return x**2 * a**2
mod = Test()
for _ in range(nb_hooks):
mod.register_full_backward_hook(lambda a, b, c: None)
tmp = mod(t)
# Save dummy object to graph and get a weak ref to it
test = TestCls()
ref = weakref.ref(test)
tmp.grad_fn.metadata["a"] = test
with set_warn_always_context(True):
with warnings.catch_warnings(record=True) as w:
tmp.exp().sum().backward(create_graph=True)
self.assertTrue(w)
found = 0
for warning in w:
if "Using backward() with create_graph=True" in str(
warning.message
):
found += 1
self.assertEqual(found, 1)
# Remove the backward + create_graph=True cycle
a.grad = None
t.grad = None
return ref
for nb_hooks in (1, 2, 3):
for input_requires_grad in (True, False):
ref_ = get_ref(
input_requires_grad=input_requires_grad,
nb_hooks=nb_hooks,
)
gc.collect()
self.assertIsNone(ref_())
@parametrize("use_custom_function", [True, False])
@parametrize("use_tensor_hook", [True, False])
def test_hook_closure_cycle(self, use_custom_function, use_tensor_hook):
# This creates a cycle between the hook and grad_fn_b
# hook -> closure -> grad_fn_b (python) -> grad_fn (cpp) -> hook (cpp)
# -> dict -> hook
#
# This test is testing that the grad_fn_b (python) only traverses the
# dict if it is the only one holding a reference to the grad_fn_b (cpp)
# shared_ptr
#
# See: https://github.com/pytorch/pytorch/issues/102174
class Function(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
class Test:
pass
count = [0]
def scope():
a = torch.tensor(1.0, requires_grad=True)
if use_custom_function:
b = Function.apply(a)
else:
b = a.clone()
grad_fn_b = b.grad_fn
obj = Test()
def hook(*args):
# Make sure this hook's closure holds onto grad_fn_b
# This forms a cycle between the hook and grad_fn_b
# We also hold onto a sentinel object 'obj' to track
# whether this cycle is still alive. See 'ref' below.
grad_fn_b
obj
count[0] += 1
if use_tensor_hook:
b.register_hook(hook)
else:
b.grad_fn.register_hook(hook)
c = b.clone()
ref = weakref.ref(obj)
return c, ref
with disable_gc():
out, ref = scope()
out.backward(retain_graph=True)
gc.collect()
# Make sure gc does not clear the cycle noted above.
# e.g. the hook is alive and gets fired even after gc runs
out.backward(retain_graph=True)
self.assertEqual(count[0], 2)
# ref is still alive because the use_count of the cpp grad_fn
# shared_ptr > 1 since (1) the python grad_fn is alive, and (2) the
# rest of the graph holds onto the shared_ptr
self.assertIsNotNone(ref())
# Then delete the rest of the graph and check that ref is dead
del out
gc.collect()
self.assertIsNone(ref())
def test_full_backward_hook_double_backward(self):
x = torch.rand(1, requires_grad=True)
y = torch.rand_like(x)
func = torch.nn.MSELoss()
counter = [0]
def hook(module, grad_input, grad_output):
counter[0] += 1
func.register_full_backward_hook(hook)
f = func(x, y)
(gradx_f,) = torch.autograd.grad(f, x, create_graph=True)
self.assertEqual(counter[0], 1)
_ = torch.autograd.grad(gradx_f, x)
# We should not error, and counter should not be incremented
self.assertEqual(counter[0], 1)
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def test_saved_variable_packing_unpacking_saved_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
def test(get_input, is_leaf):
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x / 2)
self.assertEqual(a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(2 * a, a.grad)
a = get_input()
grad_fn = a.grad_fn
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: 2 * x, lambda x: x)
self.assertEqual(2 * a, y.grad_fn._saved_self)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
y.sum().backward()
else:
y.sum().backward()
self.assertEqual(3 * a, a.grad)
# double backward
a = get_input()
grad_fn = a.grad_fn
y = a**3
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
s = torch.sum(y)
(g,) = torch.autograd.grad(s, (a,), create_graph=True)
if not is_leaf:
self.assertIs(grad_fn, y.grad_fn._saved_self.grad_fn)
g.sum().backward()
else:
g.sum().backward()
self.assertEqual(6 * a, a.grad)
a = get_input()
y = a * a
y.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: 1)
with self.assertRaisesRegex(
TypeError, "Output of saved tensor unpack_hook expected to be a Tensor"
):
print(y.grad_fn._saved_self)
a = get_input()
y = a * a
with self.assertRaisesRegex(
TypeError, "missing 1 required positional argument"
):
y.grad_fn._raw_saved_self.register_hooks(lambda x, b: x, lambda x: x)
a = get_input()
y = a * a
with self.assertRaisesRegex(
TypeError, "missing 1 required positional argument"
):
y.grad_fn._raw_saved_self.register_hooks(
lambda x, b: (x, b), lambda x: x
)
def inplace_double(x):
x *= 2
return x
a = get_input()
t = a * a
with self.assertRaisesRegex(
RuntimeError,
"A saved tensor pack hook is modifying its input in place.",
):
t.grad_fn._raw_saved_self.register_hooks(
inplace_double, lambda x: x / 2
)
# leaf
test(lambda: torch.randn(5, requires_grad=True), True)
# not leaf, not output
test(lambda: (1 + torch.randn(5, requires_grad=True)), False)
def test_saved_variable_saved_original_inplace_detach(self):
# Detaching a tensor that is saved input raises
a = torch.tensor(1.0, requires_grad=True).clone()
b = a.sin()
a.detach_()
with self.assertRaisesRegex(
RuntimeError, "Trying to use a saved tensor that has been detached"
):
b.backward()
# Detaching a tensor that is saved as output is OK
a = torch.tensor(1.0, requires_grad=True).clone()
b = a.exp()
a.detach_()
b.backward()
def test_saved_variable_packing_unpacking_did_not_save_original_with_hooks(self):
# Tests that packing/unpacking a SavedVariable works correctly with user-defined hooks
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
y.grad_fn._raw_saved_result.register_hooks(lambda x: x, lambda x: x)
self.assertEqual(y, y.grad_fn._saved_result)
self.assertIs(y.grad_fn, y.grad_fn._saved_result.grad_fn)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_saved_variable_packing_unpacking_saved_original_with_default_hooks(self):
# Tests that default hooks are properly registered, used and reset
# The saved_original / did_not_save_original distinction corresponds to the `save_original`
# attribute of `SavedVariable`.
# See also:
# - test_saved_variable_packing_unpacking_saved_original_with_hooks
def pack(x):
warnings.warn("pack")
return x
with torch.autograd.graph.saved_tensors_hooks(pack, lambda x: x):
a = torch.ones(5, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y = a * a
# should raise two warnings from a being saved twice
self.assertEqual(len(w), 2)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x / 2):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(2 * a, y.grad_fn._saved_self)
self.assertEqual(2 * a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(4 * a, a.grad)
# Exited hooks correctly
a = torch.randn(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_saved_variable_packing_unpacking_did_not_save_original_with_default_hooks(
self,
):
# See also test_saved_variable_packing_unpacking_did_not_save_original_with_hooks
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = torch.exp(a)
self.assertEqual(y, y.grad_fn._saved_result)
y.sum().backward()
self.assertEqual(a.grad, y)
def test_setting_default_saved_variable_hooks_twice_should_not_fail(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
def test_setting_default_saved_variable_hooks_twice_should_use_inner(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: 3 * x, lambda x: 3 * x):
b = torch.randn(5, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(
lambda x: 5 * x, lambda x: 5 * x
):
a = torch.randn(5, requires_grad=True)
y = a * a
z = b * b
y.sum().backward()
z.sum().backward()
self.assertEqual(2 * 5 * 5 * a, a.grad)
self.assertEqual(2 * 3 * 3 * b, b.grad)
def test_disabling_saved_tensor_hooks(self):
with torch.autograd.graph.disable_saved_tensors_hooks("error message"):
with self.assertRaisesRegex(RuntimeError, "error message"):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
pass
self.assertTrue(torch._C._autograd._saved_tensors_hooks_is_enabled())
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
with self.assertRaisesRegex(RuntimeError, "error message"):
with torch.autograd.graph.disable_saved_tensors_hooks("error message"):
pass
self.assertTrue(torch._C._autograd._saved_tensors_hooks_is_enabled())
def test_disabling_saved_tensor_hooks_nested(self):
with torch.autograd.graph.disable_saved_tensors_hooks("outer"):
with torch.autograd.graph.disable_saved_tensors_hooks("inner"):
with self.assertRaisesRegex(RuntimeError, "inner"):
with torch.autograd.graph.saved_tensors_hooks(
lambda x: x, lambda x: x
):
pass
self.assertFalse(torch._C._autograd._saved_tensors_hooks_is_enabled())
self.assertTrue(torch._C._autograd._saved_tensors_hooks_is_enabled())
def test_saved_tensor_hooks_custom_error_propagation(self):
class CustomError(Exception):
pass
class error_on_pack_hook(torch.autograd.graph.saved_tensors_hooks):
def __init__(self) -> None:
def pack_hook(x):
raise CustomError("pack")
super().__init__(pack_hook, lambda x: x)
class error_on_unpack_hook(torch.autograd.graph.saved_tensors_hooks):
def __init__(self) -> None:
def unpack_hook(x):
raise CustomError("unpack")
super().__init__(lambda x: x, unpack_hook)
a = torch.tensor(1.0, requires_grad=True)
with error_on_pack_hook():
with self.assertRaisesRegex(CustomError, "pack"):
out = torch.sin(a)
with error_on_unpack_hook():
out = torch.sin(a)
with self.assertRaisesRegex(CustomError, "unpack"):
out.backward()
def test_saved_tensor_hooks_custom_function_intermediates(self):
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
intermediate = x.exp()
ctx.save_for_backward(
intermediate.clone().detach_().requires_grad_(True)
)
return x.exp()
@staticmethod
def backward(ctx, grad_out):
(intermediate,) = ctx.saved_tensors
return grad_out * intermediate
a = torch.tensor(1.0, requires_grad=True)
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
out = Func.apply(a)
out.backward()
def test_unpack_hooks_exec_count(self):
def f(x, y):
return x * y
pack_count = 0
unpack_count = 0
def pack_hook(x):
nonlocal pack_count
pack_count += 1
return x
# unpack hook shouldn't run during compilation, while we trace the forward
def unpack_hook(x):
nonlocal unpack_count
unpack_count += 1
return x
x = torch.ones(4, requires_grad=True)
y = torch.ones(4, requires_grad=False)
with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook):
out_test = f(x, y)
self.assertEqual(pack_count, 1)
self.assertEqual(unpack_count, 0)
out_test.sum().backward()
self.assertEqual(pack_count, 1)
self.assertEqual(unpack_count, 1)
def test_saved_tensors_hook_version_counter_not_shared(self):
class Test(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x.sin()
@staticmethod
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
before = a._version
x.add_(1)
self.assertEqual(a._version, before)
return grad_output
a = torch.tensor(1.0, requires_grad=True)
a_replacement = a.clone()
def pack_hook(x):
return a_replacement
def unpack_hook(x):
return x
with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook):
b = Test.apply(a)
b.backward()
def test_save_on_cpu_and_checkpoint(self):
a = torch.randn(2, 2, requires_grad=True)
b = a.pow(2).pow(2).pow(2).pow(2)
b.sum().backward()
b_grad = a.grad.clone()
a.grad.zero_()
with torch.autograd.graph.save_on_cpu():
h = a.pow(2)
h = checkpoint(lambda x: x.pow(2).pow(2), h, use_reentrant=False)
c = h.pow(2)
c.sum().backward()
c_grad = a.grad.clone()
a.grad.zero_()
def f(a):
h = a.pow(2)
with torch.autograd.graph.save_on_cpu():
h = h.pow(2).pow(2)
return h.pow(2)
d = checkpoint(f, a, use_reentrant=False)
d.sum().backward()
d_grad = a.grad.clone()
self.assertEqual(b_grad, c_grad)
self.assertEqual(b_grad, d_grad)
def test_pack_hook_with_inplace_modification_should_fail(self):
a = torch.randn(5, requires_grad=True)
def inc(x):
x += 1
return x
with torch.autograd.graph.saved_tensors_hooks(inc, lambda x: x):
with self.assertRaisesRegex(
RuntimeError,
"A saved tensor pack hook is modifying its input in place.",
):
y = torch.exp(a)
y = torch.exp(a)
with self.assertRaisesRegex(
RuntimeError, "A saved tensor pack hook is modifying its input in place."
):
y.grad_fn._raw_saved_result.register_hooks(inc, lambda x: x)
def test_saving_variable_to_disk(self):
with tempfile.TemporaryDirectory() as tmp_dir:
def pack(x):
name = os.path.join(tmp_dir, str(uuid.uuid4()))
torch.save(x, name)
return name
def unpack(name):
return torch.load(name)
with torch.autograd.graph.saved_tensors_hooks(pack, unpack):
a = torch.ones(5, requires_grad=True)
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
y.sum().backward()
self.assertEqual(2 * a, a.grad)
def test_default_saved_tensors_hooks_double_backward(self):
with torch.autograd.graph.saved_tensors_hooks(lambda x: x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a**3
s = torch.sum(y)
(g,) = torch.autograd.grad(s, (a,), create_graph=True)
g.sum().backward()
self.assertEqual(6 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a**3
s = torch.sum(y)
(g,) = torch.autograd.grad(s, (a,), create_graph=True)
g.sum().backward()
# factor 2 because only a is saved once
self.assertEqual(6 * 2 * a, a.grad)
a = torch.randn(5, requires_grad=True)
y = a**3
s = torch.sum(y)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
(g,) = torch.autograd.grad(s, (a,), create_graph=True)
g.sum().backward()
# factor 4 because pow_backward is grad * (exp * self.pow(exp - 1))
# so grad is saved and self (i.e. a) is saved
self.assertEqual(6 * 4 * a, a.grad)
with torch.autograd.graph.saved_tensors_hooks(lambda x: 2 * x, lambda x: x):
a = torch.randn(5, requires_grad=True)
y = a**3
s = torch.sum(y)
(g,) = torch.autograd.grad(s, (a,), create_graph=True)
g.sum().backward()
# combining the two above blocks: 2 * 4 = 8
# note that in that sense, a is saved twice
self.assertEqual(6 * 8 * a, a.grad)
def test_wrapped_number_saved_tensors_hooks(self):
def err_hook(x):
raise RuntimeError("this hook should not be called")
with torch.autograd.graph.saved_tensors_hooks(err_hook, err_hook):
a = torch.randn(5, requires_grad=True)
out = (a * 3).sum()
# 3 is saved as a saved tensor because it is a wrapped number, but
# wrapped numbers should be special cased to not trigger saved variable hooks
torch.autograd.grad(out, (a,))
def test_graph_save_on_cpu(self):
def test(get_input, cuda, pin_memory):
with torch.autograd.graph.save_on_cpu(pin_memory):
a = get_input()
if cuda:
a.cuda()
y = a * a
self.assertEqual(a, y.grad_fn._saved_self)
self.assertEqual(a, y.grad_fn._saved_other)
self.assertEqual(a.dtype, y.grad_fn._saved_self.dtype)
self.assertEqual(a.layout, y.grad_fn._saved_self.layout)
if y.is_sparse:
y = y.to_dense()
y.sum().backward()
actual = 2 * a
expected = a.grad
if a.is_sparse:
actual = actual.coalesce()
expected = expected.coalesce()
self.assertEqual(actual, expected)
for cuda in [False] + ([True] if torch.cuda.is_available() else []):
for pin_memory in [True, False]:
# FloatTensor
test(lambda: torch.randn(5, requires_grad=True), cuda, pin_memory)
# DoubleTensor
test(
lambda: torch.randn(5, requires_grad=True, dtype=torch.double),
cuda,
pin_memory,
)
# Sparse tensor
x = torch.sparse_coo_tensor(
torch.tensor([[1, 1]]).long(),
torch.tensor([1.0, 1.0]),
requires_grad=True,
)
test(lambda: x, cuda, pin_memory)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_graph_save_on_cpu_cuda(self):
def f(x):
a = x + 1
return a * a
# with grad
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_grad = torch.cuda.memory_allocated()
del a
del y
# without grad
a = torch.ones(1, requires_grad=True, device="cuda")
with torch.no_grad():
y = f(a)
memory_without_grad = torch.cuda.memory_allocated()
self.assertGreater(memory_with_grad, memory_without_grad)
del a
del y
# with hooks
with torch.autograd.graph.save_on_cpu():
a = torch.ones(1, requires_grad=True, device="cuda")
y = f(a)
memory_with_hooks = torch.cuda.memory_allocated()
self.assertEqual(memory_with_hooks, memory_without_grad)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_scalar_grad_mixed_device(self):
x = torch.tensor(1.0, requires_grad=True)
y = torch.randn(2, 2, device="cuda")
out = x * y
out.sum().backward()
@scoped_load_inline
def test_multi_grad_all_hooks(self, load_inline):
t1 = torch.rand(2, requires_grad=True)
t2 = torch.rand(2, requires_grad=True)
t3 = torch.rand(2, requires_grad=True)
t4 = torch.rand(2, requires_grad=True)
# Ensure we properly detect all types of Nodes here
# C++ Node
t1 = t1.mul(2)
# Python custom Function
class Foo(Function):
@staticmethod
def forward(ctx, a):
return a.clone()
@staticmethod
def backward(ctx, gO):
return gO
t2 = Foo.apply(t2)
# C++ Node
t3 = torch._C._functions.UndefinedGrad()(t3)
# C++ Custom Op
cpp_source = """
struct CustomOpAutogradFunction : public torch::autograd::Function<CustomOpAutogradFunction> {
static torch::Tensor forward(
torch::autograd::AutogradContext* ctx,
const torch::Tensor& x) {
return x.clone();
}
static torch::autograd::variable_list backward(
torch::autograd::AutogradContext *ctx,
torch::autograd::variable_list grad_output) {
return grad_output;
}
};
torch::Tensor custom_op_backed_by_autograd_fn(torch::Tensor x) {
return CustomOpAutogradFunction::apply(x);
}
TORCH_LIBRARY(test_multigrad_all_hooks, m) {
m.def("custom_op_backed_by_autograd_fn", custom_op_backed_by_autograd_fn);
}
"""
module = load_inline(
name="test_multigrad_all_hooks",
cpp_sources=cpp_source,
functions="custom_op_backed_by_autograd_fn",
verbose=True,
)
t4 = torch.ops.test_multigrad_all_hooks.custom_op_backed_by_autograd_fn(t4)
res = [None] * 4
count = [0]
def hook(grads):
nonlocal res
count[0] += 1
res = [g is not None for g in grads]
handle = torch.autograd.graph.register_multi_grad_hook((t1, t2, t3, t4), hook)
out = t2 * t3
out.sum().backward(inputs=(t2, t3), retain_graph=True)
self.assertEqual(count[0], 1)
self.assertEqual(res, [False, True, True, False])
out.sum().backward(inputs=(t1, t4), retain_graph=True)
self.assertEqual(count[0], 1)
out.sum().backward(inputs=(t1, t3), retain_graph=True)
self.assertEqual(count[0], 2)
self.assertEqual(res, [False, False, True, False])
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
raise RuntimeError("error message")
out = Func.apply(t2) * t3
with self.assertRaisesRegex(RuntimeError, "error message"):
out.sum().backward(inputs=(t2, t3), retain_graph=True)
self.assertEqual(count[0], 2)
handle.remove()
out.sum().backward(inputs=(t1, t3), retain_graph=True)
self.assertEqual(count[0], 2)
def test_multi_grad_any_hooks(self):
hook_id = 0
any_hook_handles: list[RemovableHandle] = []
class MultiOutputModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin = nn.Linear(3, 3)
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
z = self.lin(x)
out = torch.sin(z), torch.cos(z)
nonlocal hook_id
z.register_hook(partial(hook, hook_id))
hook_id += 1
any_hook_handles.append(
torch.autograd.graph.register_multi_grad_hook(
out, partial(hook, hook_id), mode="any"
)
)
hook_id += 1
return out
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
self.mod1 = MultiOutputModule()
self.mod2 = MultiOutputModule()
def forward(self, x: torch.Tensor) -> torch.Tensor:
y = self.mod1(x)
z = y[0] + y[1]
return self.mod2(z)
hook_order: list[int] = []
hook_count = 0
def hook(hook_id: int, *unused):
nonlocal hook_count
nonlocal hook_order
hook_count += 1
hook_order.append(hook_id)
# Any hooks: IDs 1 and 3; regular hooks: IDs 0 and 2
model = Model()
inp = torch.randn((2, 3))
out = model(inp)
(out[0] + out[1]).sum().backward()
# Check that the any-hook runs only once and before the regular hook
# for each module
self.assertEqual(len(any_hook_handles), 2)
self.assertEqual(hook_order, [3, 2, 1, 0])
hook_id = 0
hook_order.clear()
any_hook_handles.clear()
out = model(inp)
for handle in any_hook_handles:
handle.remove()
(out[0] + out[1]).sum().backward()
# Check that the any-hook does not run if removed
self.assertEqual(hook_order, [2, 0])
def test_multi_grad_hooks_invalid_mode(self):
t1 = torch.rand(2, requires_grad=True)
t2 = torch.rand(2, requires_grad=True)
regex = r"Expects mode to be one of \('all', 'any'\) but got foo"
with self.assertRaisesRegex(ValueError, regex):
torch.autograd.graph.register_multi_grad_hook(
(t1, t2), lambda _: None, mode="foo"
)
def test_pynode_destruction_deadlock(self):
script = """
import torch
| MyFunction |
python | getsentry__sentry | src/sentry/rules/conditions/event_frequency.py | {
"start": 4538,
"end": 15793
} | class ____(EventCondition, abc.ABC):
intervals = STANDARD_INTERVALS
def __init__(
self,
# Data specifically takes on this typeddict form for the
# Event Frequency condition classes.
data: EventFrequencyConditionData | None = None,
*args: Any,
**kwargs: Any,
) -> None:
self.tsdb = kwargs.pop("tsdb", tsdb)
self.form_fields = {
"value": {"type": "number", "placeholder": 100},
"interval": {
"type": "choice",
"choices": [
(key, label)
for key, (label, duration) in sorted(
self.intervals.items(),
key=lambda key____label__duration: key____label__duration[1][1],
)
],
},
}
kwargs["data"] = data
super().__init__(*args, **kwargs)
def _get_options(self) -> tuple[str | None, float | None]:
interval, value = None, None
try:
interval = self.get_option("interval")
value = float(self.get_option("value"))
except (TypeError, ValueError):
pass
return interval, value
def passes(self, event: GroupEvent, state: EventState) -> bool:
interval, value = self._get_options()
if not (interval and value is not None):
return False
# Assumes that the first event in a group will always be below the threshold.
if state.is_new and value > 1:
return False
comparison_type = self.get_option("comparisonType", ComparisonType.COUNT)
comparison_interval_option = self.get_option(
"comparisonInterval", DEFAULT_COMPARISON_INTERVAL
)
if comparison_interval_option == "":
return False
comparison_interval = COMPARISON_INTERVALS[comparison_interval_option][1]
_, duration = self.intervals[interval]
current_value = self.get_rate(duration=duration, comparison_interval=comparison_interval, event=event, environment_id=self.rule.environment_id, comparison_type=comparison_type) # type: ignore[union-attr]
logging.info("event_frequency_rule current: %s, threshold: %s", current_value, value)
return current_value > value
def passes_activity_frequency(
self, activity: ConditionActivity, buckets: dict[datetime, int]
) -> bool:
interval, value = self._get_options()
if not (interval and value is not None):
return False
interval_delta = self.intervals[interval][1]
comparison_type = self.get_option("comparisonType", ComparisonType.COUNT)
# extrapolate if interval less than bucket size
# if comparing percent increase, both intervals will be increased, so do not extrapolate value
if interval_delta < FREQUENCY_CONDITION_BUCKET_SIZE:
if comparison_type != ComparisonType.PERCENT:
value *= int(FREQUENCY_CONDITION_BUCKET_SIZE / interval_delta)
interval_delta = FREQUENCY_CONDITION_BUCKET_SIZE
result = bucket_count(activity.timestamp - interval_delta, activity.timestamp, buckets)
if comparison_type == ComparisonType.PERCENT:
comparison_interval = COMPARISON_INTERVALS[self.get_option("comparisonInterval")][1]
comparison_end = activity.timestamp - comparison_interval
comparison_result = bucket_count(
comparison_end - interval_delta, comparison_end, buckets
)
result = percent_increase(result, comparison_result)
return result > value
def get_preview_aggregate(self) -> tuple[str, str]:
raise NotImplementedError
def query(
self, event: GroupEvent, start: datetime, end: datetime, environment_id: int
) -> int | float:
"""
Queries Snuba for a unique condition for a single group.
"""
return self.query_hook(event, start, end, environment_id)
def query_hook(
self,
event: GroupEvent,
start: datetime,
end: datetime,
environment_id: int,
) -> int | float:
"""
Abstract method that specifies how to query Snuba for a single group
depending on the condition. Must be implemented by subclasses.
"""
raise NotImplementedError
def batch_query(
self, group_ids: set[int], start: datetime, end: datetime, environment_id: int
) -> dict[int, int | float]:
"""
Queries Snuba for a unique condition for multiple groups.
"""
return self.batch_query_hook(group_ids, start, end, environment_id, False)
def batch_query_hook(
self,
group_ids: set[int],
start: datetime,
end: datetime,
environment_id: int,
group_on_time: bool,
) -> dict[int, int | float]:
"""
Abstract method that specifies how to query Snuba for multiple groups
depending on the condition. Must be implemented by subclasses.
"""
raise NotImplementedError
def disable_consistent_snuba_mode(
self, duration: timedelta
) -> contextlib.AbstractContextManager[object]:
"""For conditions with interval >= 1 hour we don't need to worry about read your writes
consistency. Disable it so that we can scale to more nodes.
"""
option_override_cm: contextlib.AbstractContextManager[object] = contextlib.nullcontext()
if duration >= timedelta(hours=1):
option_override_cm = options_override({"consistent": False})
return option_override_cm
def get_query_window(self, end: datetime, duration: timedelta) -> tuple[datetime, datetime]:
"""
Calculate the start and end times for the query.
"duration" is the length of the window we're querying over.
"""
start = end - duration
return (start, end)
def get_rate(
self,
duration: timedelta,
comparison_interval: timedelta,
event: GroupEvent,
environment_id: int,
comparison_type: str,
) -> int | float:
current_time = timezone.now()
start, end = self.get_query_window(end=current_time, duration=duration)
with self.disable_consistent_snuba_mode(duration):
result = self.query(event, start, end, environment_id=environment_id)
if comparison_type == ComparisonType.PERCENT:
# TODO: Figure out if there's a way we can do this less frequently. All queries are
# automatically cached for 10s. We could consider trying to cache this and the main
# query for 20s to reduce the load.
current_time -= comparison_interval
start, end = self.get_query_window(end=current_time, duration=duration)
comparison_result = self.query(event, start, end, environment_id=environment_id)
result = percent_increase(result, comparison_result)
return result
def get_rate_bulk(
self,
duration: timedelta,
group_ids: set[int],
environment_id: int,
current_time: datetime,
comparison_interval: timedelta | None,
) -> dict[int, int | float]:
"""
Make a batch query for multiple groups. The return value is a dictionary
of group_id to the result for that group.
If comparison_interval is not None, we're making the second query in a
percent comparison condition. For example, if the condition is:
- num of issues is {}% higher in 1 hr compared to 5 min ago
The second query would be querying for num of events from:
- 5 min ago to 1 hr 5 min ago
"""
if comparison_interval:
current_time -= comparison_interval
start, end = self.get_query_window(end=current_time, duration=duration)
with self.disable_consistent_snuba_mode(duration):
result = self.batch_query(
group_ids=group_ids,
start=start,
end=end,
environment_id=environment_id,
)
return result
def get_snuba_query_result(
self,
tsdb_function: Callable[..., Any],
keys: list[int],
group_id: int,
organization_id: int,
model: TSDBModel,
start: datetime,
end: datetime,
environment_id: int,
referrer_suffix: str,
group_on_time: bool = False,
project_ids: list[int] | None = None,
) -> Mapping[int, int]:
result: Mapping[int, int] = tsdb_function(
model=model,
keys=keys,
start=start,
end=end,
environment_id=environment_id,
use_cache=True,
jitter_value=group_id,
tenant_ids={"organization_id": organization_id},
referrer_suffix=referrer_suffix,
group_on_time=group_on_time,
project_ids=project_ids,
)
return result
def get_chunked_result(
self,
tsdb_function: Callable[..., Any],
model: TSDBModel,
group_ids: list[int],
organization_id: int,
start: datetime,
end: datetime,
environment_id: int,
referrer_suffix: str,
group_on_time: bool = False,
project_ids: list[int] | None = None,
) -> dict[int, int]:
batch_totals: dict[int, int] = defaultdict(int)
group_id = group_ids[0]
for group_chunk in chunked(group_ids, SNUBA_LIMIT):
result = self.get_snuba_query_result(
tsdb_function=tsdb_function,
model=model,
keys=[group_id for group_id in group_chunk],
group_id=group_id,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix=referrer_suffix,
group_on_time=group_on_time,
project_ids=project_ids,
)
batch_totals.update(result)
return batch_totals
def get_error_and_generic_group_ids(
self,
groups: QuerySet[Group, _QSTypedDict],
) -> tuple[list[int], list[int]]:
"""
Separate group ids into error group ids and generic group ids
"""
generic_issue_ids = []
error_issue_ids = []
for group in groups:
if group["type"] == DEFAULT_TYPE_ID:
error_issue_ids.append(group["id"])
else:
generic_issue_ids.append(group["id"])
return (error_issue_ids, generic_issue_ids)
def get_value_from_groups(
self,
groups: QuerySet[Group, _QSTypedDict] | None,
value: Literal["id", "project_id", "project__organization_id"],
) -> int | None:
result = None
if groups:
group = groups[0]
result = group.get(value)
return result
def get_form_instance(self) -> EventFrequencyForm:
return EventFrequencyForm(self.data)
| BaseEventFrequencyCondition |
python | Textualize__textual | docs/examples/guide/styles/colors.py | {
"start": 80,
"end": 412
} | class ____(App):
def compose(self) -> ComposeResult:
self.widget = Static("Textual")
yield self.widget
def on_mount(self) -> None:
self.widget.styles.background = "darkblue"
self.widget.styles.border = ("heavy", "white")
if __name__ == "__main__":
app = WidgetApp()
app.run()
| WidgetApp |
python | PrefectHQ__prefect | src/prefect/input/run_input.py | {
"start": 16427,
"end": 22932
} | class ____(Generic[T]):
def __init__(
self,
run_input_cls: Type[AutomaticRunInput[T]],
key_prefix: str,
timeout: float | None = 3600,
poll_interval: float = 10,
raise_timeout_error: bool = False,
exclude_keys: Optional[Set[str]] = None,
flow_run_id: Optional[UUID] = None,
with_metadata: bool = False,
):
self.run_input_cls: Type[AutomaticRunInput[T]] = run_input_cls
self.key_prefix: str = key_prefix
self.timeout: float | None = timeout
self.poll_interval: float = poll_interval
self.exclude_keys: set[str] = set()
self.raise_timeout_error: bool = raise_timeout_error
self.flow_run_id: UUID = ensure_flow_run_id(flow_run_id)
self.with_metadata = with_metadata
if exclude_keys is not None:
self.exclude_keys.update(exclude_keys)
def __iter__(self) -> Self:
return self
def __next__(self) -> T | AutomaticRunInput[T]:
try:
not_coro = self.next()
if TYPE_CHECKING:
assert not isinstance(not_coro, Coroutine)
return not_coro
except TimeoutError:
if self.raise_timeout_error:
raise
raise StopIteration
def __aiter__(self) -> Self:
return self
async def __anext__(self) -> Union[T, AutomaticRunInput[T]]:
try:
coro = self.next()
if TYPE_CHECKING:
assert inspect.iscoroutine(coro)
return cast(Union[T, AutomaticRunInput[T]], await coro)
except TimeoutError:
if self.raise_timeout_error:
raise
raise StopAsyncIteration
async def filter_for_inputs(self) -> list["FlowRunInput"]:
flow_run_inputs_coro = filter_flow_run_input(
key_prefix=self.key_prefix,
limit=1,
exclude_keys=self.exclude_keys,
flow_run_id=self.flow_run_id,
)
if TYPE_CHECKING:
assert inspect.iscoroutine(flow_run_inputs_coro)
flow_run_inputs = await flow_run_inputs_coro
if flow_run_inputs:
self.exclude_keys.add(*[i.key for i in flow_run_inputs])
return flow_run_inputs
@sync_compatible
async def next(self) -> Union[T, AutomaticRunInput[T]]:
flow_run_inputs = await self.filter_for_inputs()
if flow_run_inputs:
return self.to_instance(flow_run_inputs[0])
with anyio.fail_after(self.timeout):
while True:
await anyio.sleep(self.poll_interval)
flow_run_inputs = await self.filter_for_inputs()
if flow_run_inputs:
return self.to_instance(flow_run_inputs[0])
def to_instance(
self, flow_run_input: "FlowRunInput"
) -> Union[T, AutomaticRunInput[T]]:
run_input = self.run_input_cls.load_from_flow_run_input(flow_run_input)
if self.with_metadata:
return run_input
return run_input.value
async def _send_input(
flow_run_id: UUID,
run_input: RunInput | pydantic.BaseModel,
sender: Optional[str] = None,
key_prefix: Optional[str] = None,
):
_run_input: Union[RunInput, AutomaticRunInput[Any]]
if isinstance(run_input, RunInput):
_run_input = run_input
else:
input_cls: Type[AutomaticRunInput[Any]] = run_input_subclass_from_type(
type(run_input)
)
_run_input = input_cls(value=run_input)
if key_prefix is None:
key_prefix = f"{_run_input.__class__.__name__.lower()}-auto"
key = f"{key_prefix}-{uuid4()}"
coro = create_flow_run_input_from_model(
key=key, flow_run_id=flow_run_id, model_instance=_run_input, sender=sender
)
if TYPE_CHECKING:
assert inspect.iscoroutine(coro)
await coro
@sync_compatible
async def send_input(
run_input: Any,
flow_run_id: UUID,
sender: Optional[str] = None,
key_prefix: Optional[str] = None,
):
await _send_input(
flow_run_id=flow_run_id,
run_input=run_input,
sender=sender,
key_prefix=key_prefix,
)
@overload
def receive_input( # type: ignore[overload-overlap]
input_type: Union[Type[R], pydantic.BaseModel],
timeout: Optional[float] = 3600,
poll_interval: float = 10,
raise_timeout_error: bool = False,
exclude_keys: Optional[Set[str]] = None,
key_prefix: Optional[str] = None,
flow_run_id: Optional[UUID] = None,
with_metadata: bool = False,
) -> GetInputHandler[R]: ...
@overload
def receive_input(
input_type: Type[T],
timeout: Optional[float] = 3600,
poll_interval: float = 10,
raise_timeout_error: bool = False,
exclude_keys: Optional[Set[str]] = None,
key_prefix: Optional[str] = None,
flow_run_id: Optional[UUID] = None,
with_metadata: bool = False,
) -> GetAutomaticInputHandler[T]: ...
def receive_input(
input_type: Union[Type[R], Type[T], pydantic.BaseModel],
timeout: Optional[float] = 3600,
poll_interval: float = 10,
raise_timeout_error: bool = False,
exclude_keys: Optional[Set[str]] = None,
key_prefix: Optional[str] = None,
flow_run_id: Optional[UUID] = None,
with_metadata: bool = False,
) -> Union[GetAutomaticInputHandler[T], GetInputHandler[R]]:
# The typing in this module is a bit complex, and at this point `mypy`
# thinks that `run_input_subclass_from_type` accepts a `Type[Never]` but
# the signature is the same as here:
# Union[Type[R], Type[T], pydantic.BaseModel],
# Seems like a possible mypy bug, so we'll ignore the type check here.
input_cls: Union[Type[AutomaticRunInput[T]], Type[R]] = (
run_input_subclass_from_type(input_type)
) # type: ignore[arg-type]
if issubclass(input_cls, AutomaticRunInput):
return input_cls.receive(
timeout=timeout,
poll_interval=poll_interval,
raise_timeout_error=raise_timeout_error,
exclude_keys=exclude_keys,
key_prefix=key_prefix,
flow_run_id=flow_run_id,
with_metadata=with_metadata,
)
else:
return input_cls.receive(
timeout=timeout,
poll_interval=poll_interval,
raise_timeout_error=raise_timeout_error,
exclude_keys=exclude_keys,
key_prefix=key_prefix,
flow_run_id=flow_run_id,
)
| GetAutomaticInputHandler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_results.py | {
"start": 9030,
"end": 13228
} | class ____(fixtures.TablesTest):
"""tests using percent signs, spaces in table and column names.
This didn't work for PostgreSQL / MySQL drivers for a long time
but is now supported.
"""
__requires__ = ("percent_schema_names",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
cls.tables.percent_table = Table(
"percent%table",
metadata,
Column("percent%", Integer),
Column("spaces % more spaces", Integer),
)
cls.tables.lightweight_percent_table = sql.table(
"percent%table",
sql.column("percent%"),
sql.column("spaces % more spaces"),
)
def test_single_roundtrip(self, connection):
percent_table = self.tables.percent_table
for params in [
{"percent%": 5, "spaces % more spaces": 12},
{"percent%": 7, "spaces % more spaces": 11},
{"percent%": 9, "spaces % more spaces": 10},
{"percent%": 11, "spaces % more spaces": 9},
]:
connection.execute(percent_table.insert(), params)
self._assert_table(connection)
def test_executemany_roundtrip(self, connection):
percent_table = self.tables.percent_table
connection.execute(
percent_table.insert(), {"percent%": 5, "spaces % more spaces": 12}
)
connection.execute(
percent_table.insert(),
[
{"percent%": 7, "spaces % more spaces": 11},
{"percent%": 9, "spaces % more spaces": 10},
{"percent%": 11, "spaces % more spaces": 9},
],
)
self._assert_table(connection)
@requirements.insert_executemany_returning
def test_executemany_returning_roundtrip(self, connection):
percent_table = self.tables.percent_table
connection.execute(
percent_table.insert(), {"percent%": 5, "spaces % more spaces": 12}
)
result = connection.execute(
percent_table.insert().returning(
percent_table.c["percent%"],
percent_table.c["spaces % more spaces"],
),
[
{"percent%": 7, "spaces % more spaces": 11},
{"percent%": 9, "spaces % more spaces": 10},
{"percent%": 11, "spaces % more spaces": 9},
],
)
eq_(result.all(), [(7, 11), (9, 10), (11, 9)])
self._assert_table(connection)
def _assert_table(self, conn):
percent_table = self.tables.percent_table
lightweight_percent_table = self.tables.lightweight_percent_table
for table in (
percent_table,
percent_table.alias(),
lightweight_percent_table,
lightweight_percent_table.alias(),
):
eq_(
list(
conn.execute(table.select().order_by(table.c["percent%"]))
),
[(5, 12), (7, 11), (9, 10), (11, 9)],
)
eq_(
list(
conn.execute(
table.select()
.where(table.c["spaces % more spaces"].in_([9, 10]))
.order_by(table.c["percent%"])
)
),
[(9, 10), (11, 9)],
)
row = conn.execute(
table.select().order_by(table.c["percent%"])
).first()
eq_(row._mapping["percent%"], 5)
eq_(row._mapping["spaces % more spaces"], 12)
eq_(row._mapping[table.c["percent%"]], 5)
eq_(row._mapping[table.c["spaces % more spaces"]], 12)
conn.execute(
percent_table.update().values(
{percent_table.c["spaces % more spaces"]: 15}
)
)
eq_(
list(
conn.execute(
percent_table.select().order_by(
percent_table.c["percent%"]
)
)
),
[(5, 15), (7, 15), (9, 15), (11, 15)],
)
| PercentSchemaNamesTest |
python | django-extensions__django-extensions | tests/management/commands/error_raising_command.py | {
"start": 67,
"end": 205
} | class ____(LoggingBaseCommand):
help = "Test error"
def handle(self, *args, **options):
raise Exception("Test Error")
| Command |
python | getsentry__sentry | tests/sentry/sentry_apps/api/parsers/test_issue_link.py | {
"start": 201,
"end": 3987
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.schema: dict[str, Any] = {
"type": "issue-link",
"link": {
"uri": "/sentry/tasks/link",
"required_fields": [
{
"type": "select",
"name": "task_id",
"label": "Task ID",
"uri": "/sentry/tasks",
}
],
"optional_fields": [{"type": "text", "name": "owner", "label": "Owner"}],
},
"create": {
"uri": "/sentry/tasks/create",
"required_fields": [
{"type": "text", "name": "title", "label": "Title"},
{"type": "text", "name": "description", "label": "Description"},
{
"type": "select",
"uri": "/sentry/tasks/projects",
"name": "project_id",
"label": "Project",
},
{
"depends_on": ["project_id"],
"type": "select",
"uri": "/sentry/tasks/boards",
"name": "board_id",
"label": "Board",
},
],
"optional_fields": [{"type": "text", "name": "owner", "label": "Owner"}],
},
}
def test_valid_schema(self) -> None:
validate_component(self.schema)
@invalid_schema
def test_missing_create_fails(self) -> None:
del self.schema["create"]
validate_component(self.schema)
@invalid_schema
def test_missing_create_uri(self) -> None:
del self.schema["create"]["uri"]
validate_component(self.schema)
@invalid_schema
def test_missing_create_required_fields(self) -> None:
del self.schema["create"]["required_fields"]
validate_component(self.schema)
@invalid_schema
def test_create_required_fields_no_elements(self) -> None:
self.schema["create"]["required_fields"] = []
validate_component(self.schema)
@invalid_schema
def test_create_required_fields_invalid_element(self) -> None:
self.schema["create"]["required_fields"] = [{"type": "markdown"}]
validate_component(self.schema)
def test_missing_create_optional_fields(self) -> None:
del self.schema["create"]["optional_fields"]
validate_component(self.schema)
@invalid_schema
def test_create_optional_fields_invalid_element(self) -> None:
self.schema["create"]["optional_fields"] = [{"type": "markdown"}]
validate_component(self.schema)
@invalid_schema
def test_missing_link(self) -> None:
del self.schema["link"]
validate_component(self.schema)
@invalid_schema
def test_missing_link_uri(self) -> None:
del self.schema["link"]["uri"]
validate_component(self.schema)
@invalid_schema
def test_missing_link_required_fields(self) -> None:
del self.schema["link"]["required_fields"]
validate_component(self.schema)
def test_missing_link_optional_fields(self) -> None:
del self.schema["link"]["optional_fields"]
validate_component(self.schema)
@invalid_schema
def test_invalid_async_option(self) -> None:
self.schema["create"]["required_fields"][2]["async"] = "cat"
validate_component(self.schema)
@invalid_schema
def test_invalid_skip_load_on_open_option(self) -> None:
self.schema["create"]["required_fields"][2]["skip_load_on_open"] = "cat"
validate_component(self.schema)
| TestIssueLinkSchemaValidation |
python | wandb__wandb | wandb/sdk/lib/progress.py | {
"start": 7627,
"end": 10893
} | class ____:
"""Renders operation stats into lines of text."""
def __init__(
self,
printer: p.Printer,
max_lines: int,
loading_symbol: str,
) -> None:
self._printer = printer
self._max_lines = max_lines
self._loading_symbol = loading_symbol
self._lines: list[str] = []
self._ops_shown = 0
def render(self, stats: pb.OperationStats) -> list[str]:
"""Convert the stats into a list of lines to display.
Args:
stats: Collection of operations to display.
Returns:
The lines of text to print. The lines do not end with the newline
character. Returns an empty list if there are no operations.
"""
for op in stats.operations:
self._add_operation(op, is_subtask=False, indent="")
if self._ops_shown < stats.total_operations:
if 1 <= self._max_lines <= len(self._lines):
self._ops_shown -= 1
self._lines.pop()
remaining = stats.total_operations - self._ops_shown
self._lines.append(f"+ {remaining} more task(s)")
return self._lines
def _add_operation(self, op: pb.Operation, is_subtask: bool, indent: str) -> None:
"""Add the operation to `self._lines`."""
if len(self._lines) >= self._max_lines:
return
if not is_subtask:
self._ops_shown += 1
status_indent_level = 0 # alignment for the status message, if any
parts: list[str] = []
# Subtask indicator.
if is_subtask and self._printer.supports_unicode:
status_indent_level += 2 # +1 for space
parts.append("↳")
# Loading symbol.
if self._loading_symbol:
status_indent_level += 2 # +1 for space
parts.append(self._loading_symbol)
# Task name.
parts.append(op.desc)
# Progress information.
if op.progress:
parts.append(f"{op.progress}")
# Task duration.
parts.append(f"({_time_to_string(seconds=op.runtime_seconds)})")
# Error status.
self._lines.append(indent + " ".join(parts))
if op.error_status:
error_word = self._printer.error("ERROR")
error_desc = self._printer.secondary_text(op.error_status)
status_indent = " " * status_indent_level
self._lines.append(
f"{indent}{status_indent}{error_word} {error_desc}",
)
# Subtasks.
if op.subtasks:
subtask_indent = indent + _INDENT
for task in op.subtasks:
self._add_operation(
task,
is_subtask=True,
indent=subtask_indent,
)
def _time_to_string(seconds: float) -> str:
"""Returns a short string representing the duration."""
if seconds < 10:
return f"{seconds:.1f}s"
if seconds < 60:
return f"{seconds:.0f}s"
if seconds < 60 * 60:
minutes = seconds / 60
return f"{minutes:.1f}m"
hours = int(seconds / (60 * 60))
minutes = int((seconds / 60) % 60)
return f"{hours}h{minutes}m"
| _OperationStatsPrinter |
python | cherrypy__cherrypy | cherrypy/test/test_misc_tools.py | {
"start": 3363,
"end": 4088
} | class ____(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def testReferer(self):
self.getPage('/referer/accept')
self.assertErrorPage(403, 'Forbidden Referer header.')
self.getPage(
'/referer/accept',
headers=[('Referer', 'http://www.example.com/')],
)
self.assertStatus(200)
self.assertBody('Accepted!')
# Reject
self.getPage('/referer/reject')
self.assertStatus(200)
self.assertBody('Accepted!')
self.getPage(
'/referer/reject',
headers=[('Referer', 'http://www.example.com/')],
)
self.assertErrorPage(403, 'Forbidden Referer header.')
| RefererTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 51153,
"end": 54647
} | class ____(GeneratedAirbyteSource):
class AuthenticateViaGoogleOauth:
@public
def __init__(
self,
client_id: str,
client_secret: str,
refresh_token: str,
auth_type: Optional[str] = None,
access_token: Optional[str] = None,
):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.access_token = check.opt_str_param(access_token, "access_token")
class ServiceAccountKeyAuthentication:
@public
def __init__(self, credentials_json: str, auth_type: Optional[str] = None):
self.auth_type = check.opt_str_param(auth_type, "auth_type")
self.credentials_json = check.str_param(credentials_json, "credentials_json")
@public
def __init__(
self,
name: str,
credentials: Union[
"GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth",
"GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication",
],
start_date: str,
view_id: str,
custom_reports: Optional[str] = None,
window_in_days: Optional[int] = None,
):
"""Airbyte Source for Google Analytics V4.
Documentation can be found at https://docs.airbyte.com/integrations/sources/google-analytics-v4
Args:
name (str): The name of the destination.
credentials (Union[GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth, GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication]): Credentials for the service
start_date (str): The date in the format YYYY-MM-DD. Any data before this date will not be replicated.
view_id (str): The ID for the Google Analytics View you want to fetch data from. This can be found from the Google Analytics Account Explorer.
custom_reports (Optional[str]): A JSON array describing the custom reports you want to sync from Google Analytics. See the docs for more information about the exact format you can use to fill out this field.
window_in_days (Optional[int]): The time increment used by the connector when requesting data from the Google Analytics API. More information is available in the docs. The bigger this value is, the faster the sync will be, but the more likely that sampling will be applied to your data, potentially causing inaccuracies in the returned results. We recommend setting this to 1 unless you have a hard requirement to make the sync faster at the expense of accuracy. The minimum allowed value for this field is 1, and the maximum is 364.
"""
self.credentials = check.inst_param(
credentials,
"credentials",
(
GoogleAnalyticsV4Source.AuthenticateViaGoogleOauth,
GoogleAnalyticsV4Source.ServiceAccountKeyAuthentication,
),
)
self.start_date = check.str_param(start_date, "start_date")
self.view_id = check.str_param(view_id, "view_id")
self.custom_reports = check.opt_str_param(custom_reports, "custom_reports")
self.window_in_days = check.opt_int_param(window_in_days, "window_in_days")
super().__init__("Google Analytics V4", name)
| GoogleAnalyticsV4Source |
python | pypa__virtualenv | src/virtualenv/discovery/py_spec.py | {
"start": 252,
"end": 5034
} | class ____:
"""Contains specification about a Python Interpreter."""
def __init__( # noqa: PLR0913
self,
str_spec: str,
implementation: str | None,
major: int | None,
minor: int | None,
micro: int | None,
architecture: int | None,
path: str | None,
*,
free_threaded: bool | None = None,
) -> None:
self.str_spec = str_spec
self.implementation = implementation
self.major = major
self.minor = minor
self.micro = micro
self.free_threaded = free_threaded
self.architecture = architecture
self.path = path
@classmethod
def from_string_spec(cls, string_spec: str): # noqa: C901, PLR0912
impl, major, minor, micro, threaded, arch, path = None, None, None, None, None, None, None
if os.path.isabs(string_spec): # noqa: PLR1702
path = string_spec
else:
ok = False
match = re.match(PATTERN, string_spec)
if match:
def _int_or_none(val):
return None if val is None else int(val)
try:
groups = match.groupdict()
version = groups["version"]
if version is not None:
versions = tuple(int(i) for i in version.split(".") if i)
if len(versions) > 3: # noqa: PLR2004
raise ValueError # noqa: TRY301
if len(versions) == 3: # noqa: PLR2004
major, minor, micro = versions
elif len(versions) == 2: # noqa: PLR2004
major, minor = versions
elif len(versions) == 1:
version_data = versions[0]
major = int(str(version_data)[0]) # first digit major
if version_data > 9: # noqa: PLR2004
minor = int(str(version_data)[1:])
threaded = bool(groups["threaded"])
ok = True
except ValueError:
pass
else:
impl = groups["impl"]
if impl in {"py", "python"}:
impl = None
arch = _int_or_none(groups["arch"])
if not ok:
path = string_spec
return cls(string_spec, impl, major, minor, micro, arch, path, free_threaded=threaded)
def generate_re(self, *, windows: bool) -> re.Pattern:
"""Generate a regular expression for matching against a filename."""
version = r"{}(\.{}(\.{})?)?".format(
*(r"\d+" if v is None else v for v in (self.major, self.minor, self.micro))
)
impl = "python" if self.implementation is None else f"python|{re.escape(self.implementation)}"
mod = "t?" if self.free_threaded else ""
suffix = r"\.exe" if windows else ""
version_conditional = (
"?"
# Windows Python executables are almost always unversioned
if windows
# Spec is an empty string
or self.major is None
else ""
)
# Try matching `direct` first, so the `direct` group is filled when possible.
return re.compile(
rf"(?P<impl>{impl})(?P<v>{version}{mod}){version_conditional}{suffix}$",
flags=re.IGNORECASE,
)
@property
def is_abs(self):
return self.path is not None and os.path.isabs(self.path)
def satisfies(self, spec):
"""Called when there's a candidate metadata spec to see if compatible - e.g. PEP-514 on Windows."""
if spec.is_abs and self.is_abs and self.path != spec.path:
return False
if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():
return False
if spec.architecture is not None and spec.architecture != self.architecture:
return False
if spec.free_threaded is not None and spec.free_threaded != self.free_threaded:
return False
for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):
if req is not None and our is not None and our != req:
return False
return True
def __repr__(self) -> str:
name = type(self).__name__
params = "implementation", "major", "minor", "micro", "architecture", "path", "free_threaded"
return f"{name}({', '.join(f'{k}={getattr(self, k)}' for k in params if getattr(self, k) is not None)})"
__all__ = [
"PythonSpec",
]
| PythonSpec |
python | django__django | tests/files/tests.py | {
"start": 9810,
"end": 10522
} | class ____(unittest.TestCase):
def test_extension_kept(self):
"""The temporary file name has the same suffix as the original file."""
with TemporaryUploadedFile("test.txt", "text/plain", 1, "utf8") as temp_file:
self.assertTrue(temp_file.file.name.endswith(".upload.txt"))
def test_file_upload_temp_dir_pathlib(self):
with tempfile.TemporaryDirectory() as tmp_dir:
with override_settings(FILE_UPLOAD_TEMP_DIR=Path(tmp_dir)):
with TemporaryUploadedFile(
"test.txt", "text/plain", 1, "utf-8"
) as temp_file:
self.assertTrue(os.path.exists(temp_file.file.name))
| TemporaryUploadedFileTests |
python | doocs__leetcode | solution/0400-0499/0470.Implement Rand10() Using Rand7()/Solution.py | {
"start": 110,
"end": 345
} | class ____:
def rand10(self):
"""
:rtype: int
"""
while 1:
i = rand7() - 1
j = rand7()
x = i * 7 + j
if x <= 40:
return x % 10 + 1
| Solution |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 3017,
"end": 3174
} | class ____(WarehouseFactory):
class Meta:
model = Organization.Event
source = factory.SubFactory(OrganizationFactory)
| OrganizationEventFactory |
python | celery__celery | celery/concurrency/gevent.py | {
"start": 2257,
"end": 4953
} | class ____(base.BasePool):
"""GEvent Pool."""
Timer = Timer
signal_safe = False
is_green = True
task_join_will_block = False
_pool = None
_pool_map = None
_quick_put = None
def __init__(self, *args, **kwargs):
from gevent import getcurrent, spawn_raw
from gevent.pool import Pool
self.Pool = Pool
self.getcurrent = getcurrent
self.getpid = lambda: id(getcurrent())
self.spawn_n = spawn_raw
self.timeout = kwargs.get('timeout')
super().__init__(*args, **kwargs)
def on_start(self):
self._pool = self.Pool(self.limit)
self._pool_map = {}
self._quick_put = self._pool.spawn
def on_stop(self):
if self._pool is not None:
self._pool.join()
def on_apply(self, target, args=None, kwargs=None, callback=None,
accept_callback=None, timeout=None,
timeout_callback=None, apply_target=apply_target, **_):
timeout = self.timeout if timeout is None else timeout
target = self._make_killable_target(target)
greenlet = self._quick_put(apply_timeout if timeout else apply_target,
target, args, kwargs, callback, accept_callback,
self.getpid, timeout=timeout, timeout_callback=timeout_callback)
self._add_to_pool_map(id(greenlet), greenlet)
greenlet.terminate = types.MethodType(_terminate, greenlet)
return greenlet
def grow(self, n=1):
self._pool._semaphore.counter += n
self._pool.size += n
def shrink(self, n=1):
self._pool._semaphore.counter -= n
self._pool.size -= n
def terminate_job(self, pid, signal=None):
import gevent
if pid in self._pool_map:
greenlet = self._pool_map[pid]
gevent.kill(greenlet)
@property
def num_processes(self):
return len(self._pool)
@staticmethod
def _make_killable_target(target):
def killable_target(*args, **kwargs):
from greenlet import GreenletExit
try:
return target(*args, **kwargs)
except GreenletExit:
return (False, None, None)
return killable_target
def _add_to_pool_map(self, pid, greenlet):
self._pool_map[pid] = greenlet
greenlet.link(
functools.partial(self._cleanup_after_job_finish, pid=pid, pool_map=self._pool_map),
)
@staticmethod
def _cleanup_after_job_finish(greenlet, pool_map, pid):
del pool_map[pid]
def _terminate(self, signal):
# Done in `TaskPool.terminate_job`
pass
| TaskPool |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 27142,
"end": 27253
} | class ____(BaseBuilder):
__tag__ = "w:oooChild"
__nspfxs__ = ("w",)
__attrs__ = ()
| CT_OooChildBuilder |
python | huggingface__transformers | tests/models/seamless_m4t/test_modeling_seamless_m4t.py | {
"start": 32711,
"end": 40220
} | class ____(unittest.TestCase):
repo_id = "facebook/hf-seamless-m4t-medium"
def assertListAlmostEqual(self, list1, list2, tol=1e-3):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
@cached_property
def processor(self):
return SeamlessM4TProcessor.from_pretrained(self.repo_id)
@cached_property
def input_text(self):
# corresponds to "C'est un test." with seamlessM4T_medium checkpoint
input_ids = torch.tensor([[256057, 152, 248116, 354, 159, 7356, 248075, 3]]) # fmt: skip
input_ids = input_ids.to(torch_device)
attention_mask = torch.ones_like(input_ids).to(torch_device)
inputs = {
"attention_mask": attention_mask,
"input_ids": input_ids,
}
return inputs
@cached_property
def input_audio(self):
set_seed(0)
seq_len = 20000
sampling_rate = 16000
input_features = torch.rand((2, seq_len))
return self.processor(audio=[input_features.tolist()], sampling_rate=sampling_rate, return_tensors="pt").to(
torch_device
)
def factory_test_task(self, class1, class2, inputs, class1_kwargs, class2_kwargs):
model1 = class1.from_pretrained(self.repo_id).to(torch_device)
model2 = class2.from_pretrained(self.repo_id).to(torch_device)
set_seed(0)
output_1 = model1.generate(**inputs, **class1_kwargs)
set_seed(0)
output_2 = model2.generate(**inputs, **class2_kwargs)
for key in output_1:
if isinstance(output_1[key], torch.Tensor):
if len(output_1[key].shape) == 0:
self.assertEqual(output_1[key].item(), output_2[key].item())
else:
self.assertListAlmostEqual(output_1[key].squeeze().tolist(), output_2[key].squeeze().tolist())
@slow
def test_to_eng_text(self):
model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device)
# test text - tgt lang: eng
expected_text_tokens = [3, 256047, 3291, 248116, 248066, 9, 7356, 248075, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
2,10051,8980,8212,949,1270,4311,1123,5918,2333,5311,3882,2415,5284,1123,612,8816,6370,5386,7334,4345,5645,
9437,5748,1378,9818,4319,7968,7375,2909,9119,5151,8728,5335,3896,4013,8939,8885,6048,9530,3167,5833,1072,693,
431,9867,364,7909,4608,5938,1889,9984,7947,4944,6171,3767,9861,9169,1187,8365,4571,7635,7784,7635,800,2393,
32,5380,5852,8289,2530,2762,1833,2056,3553,4641,3553,5683,370,2288,1344,1518,7534,703,8359,7699,2
]
# fmt: on
expected_wav_slice = [-3e-05, -0.0004, -0.00037, -0.00013, -6e-05, 0.00012, -0.00016, 0.00025, 7e-05, -3e-05] # fmt: skip
set_seed(0)
output = model.generate(**self.input_text, num_beams=1, tgt_lang="eng", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
# FOR NOW, only first units correspondence
self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10])
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
@slow
def test_to_swh_text(self):
model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device)
# test text - tgt lang: swh
expected_text_tokens = [3, 256168, 1665, 188589, 7040, 248075, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
2,10071,5729,9995,3089,7546,1204,1721,2532,4340,5623,3496,432,7730,9096,7677,3143,8211,6447,8399,4248,3565,
4529,7700,9308,217,6476,3485,9667,3194,8476,4923,5593,1148,4466,7416,4872,463,4872,253,2348,4640,3450,2133,
6318,2806,817,7613,2698,6563,8712,8344,9286,6878,6387,4281,6387,640,6387,3200,640,8355,640,6708,979,1738,2
]
# fmt: on
expected_wav_slice = [1e-05, -7e-05, -4e-05, -4e-05, -6e-05, -9e-05, -0.0001, -2e-05, -7e-05, -2e-05] # fmt: skip
set_seed(0)
output = model.generate(**self.input_text, num_beams=1, tgt_lang="swh", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10])
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
@require_speech
@slow
def test_to_rus_speech(self):
model = SeamlessM4TModel.from_pretrained(self.repo_id).to(torch_device)
# test audio - tgt lang: rus
expected_text_tokens = [3, 256147, 1197, 73565, 3413, 537, 233331, 248075, 3] # fmt: skip
# fmt: off
expected_unit_tokens = [
2, 10067, 5729, 4798, 9631, 8378, 4446, 2393, 6901, 5983, 2817, 4629, 8532, 1991, 2931, 8576, 8857, 5936, 4317,
9000, 7740, 7995, 1225, 5980, 6094, 1420, 5373, 8771, 6600, 4487, 7029, 3630, 6740, 4870, 1483, 3003, 5585, 5511,
7465, 3222, 32, 6272, 1950, 3120, 5368, 639, 3713, 5935, 7943, 567, 6129, 6822, 1226, 5063, 9878, 7756, 8825, 1078, 5943,
457, 9282, 9668, 817, 7613, 2698, 6563, 8712, 8704, 9286, 8704, 6387, 4281, 6387, 640, 3200, 6387, 640, 8355, 6708, 979, 1738, 2
]
# fmt: on
expected_wav_slice = [0.00013, 0.00012, 0.00014, 3e-05, 0.0, -6e-05, -0.00018, -0.00016, -0.00021, -0.00018] # fmt: skip
set_seed(0)
output = model.generate(**self.input_audio, num_beams=1, tgt_lang="rus", return_intermediate_token_ids=True)
self.assertListEqual(expected_text_tokens, output.sequences.squeeze().tolist())
self.assertListEqual(expected_unit_tokens[:10], output.unit_sequences.squeeze().tolist()[:10])
self.assertListAlmostEqual(expected_wav_slice, output.waveform.squeeze().tolist()[50:60])
@slow
def test_text_to_text_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False}
kwargs2 = {
"tgt_lang": "eng",
"output_hidden_states": True,
"return_dict_in_generate": True,
"output_scores": True,
}
self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToText, self.input_text, kwargs1, kwargs2)
@require_speech
@slow
def test_speech_to_text_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True, "generate_speech": False}
kwargs2 = {
"tgt_lang": "eng",
"output_hidden_states": True,
"return_dict_in_generate": True,
"output_scores": True,
}
self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToText, self.input_audio, kwargs1, kwargs2)
@require_speech
@slow
def test_speech_to_speech_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True}
self.factory_test_task(SeamlessM4TModel, SeamlessM4TForSpeechToSpeech, self.input_audio, kwargs1, kwargs1)
@slow
def test_text_to_speech_model(self):
kwargs1 = {"tgt_lang": "eng", "return_intermediate_token_ids": True}
self.factory_test_task(SeamlessM4TModel, SeamlessM4TForTextToSpeech, self.input_text, kwargs1, kwargs1)
| SeamlessM4TModelIntegrationTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict12.py | {
"start": 316,
"end": 971
} | class ____(TD1):
foo: Required[str]
td1: TD1 = {}
td2: TD2 = {"foo": "hi"}
v1: str | None = td1.get("bar")
v2: str = td1.get("bar", "")
v3: str | int = td1.get("bar", 3)
v4: str = td1.setdefault("bar", "1")
# This should generate an error.
td1.setdefault("bar", 3)
# This should generate an error.
td1.setdefault("bar")
# This should generate an error.
td1.setdefault("baz", "")
v6: str = td1.pop("bar")
v7: str | int = td1.pop("bar", 1)
v8: str | int = td1.pop("bar", 3)
v9 = td2.pop("foo")
reveal_type(v9, expected_text="object")
v10 = td2.pop("foo", None)
reveal_type(v10, expected_text="object | None")
td1.__delitem__("bar")
@final
| TD2 |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_documentation.py | {
"start": 166,
"end": 7389
} | class ____:
def test_passed_when_no_breaking_changes(self, mocker):
# Arrange
connector = mocker.Mock(technical_name="test-connector", metadata={}, migration_guide_file_path=None)
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert "No breaking changes found. A migration guide is not required" in result.message
def test_fail_when_migration_guide_file_path_does_not_exists(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "not_existing_migration_guide.md",
)
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Migration guide file is missing " in result.message
assert connector.technical_name in result.message
assert "Please create a migration guide in" in result.message
def test_fail_when_migration_guide_file_path_is_none(self, mocker):
# Arrange
connector = mocker.Mock(
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description"}}},
migration_guide_file_path=None,
)
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Migration guide file is missing " in result.message
assert connector.technical_name in result.message
assert "Please create a migration guide in" in result.message
def test_fail_when_migration_guide_file_does_not_start_with_correct_header(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert "Migration guide file for test-connector does not start with the correct header" in result.message
assert "Expected '# Test Connector Migration Guide', got ''" in result.message
def test_fail_when_migration_guide_file_has_missing_version_headings(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description", "2.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("# Test Connector Migration Guide\n## Upgrading to 1.0.0\n")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"Migration guide file for {connector.name_from_metadata} has incorrect version headings" in result.message
assert "Expected headings: ['## Upgrading to 2.0.0', '## Upgrading to 1.0.0']" in result.message
def test_fail_when_migration_guide_file_has_invalid_version_headings(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description", "2.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("# Test Connector Migration Guide\n## Upgrade to 1.0.0\n")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"Migration guide file for {connector.name_from_metadata} has incorrect version headings" in result.message
assert "Expected headings: ['## Upgrading to 2.0.0', '## Upgrading to 1.0.0']" in result.message
def test_fail_when_migration_guide_file_has_ascending_version_headings(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description", "2.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("# Test Connector Migration Guide\n## Upgrading to 1.0.0\n## Upgrading to 2.0.0\n")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"Migration guide file for {connector.name_from_metadata} has incorrect version headings" in result.message
assert "Expected headings: ['## Upgrading to 2.0.0', '## Upgrading to 1.0.0']" in result.message
def test_fail_when_migration_guide_file_has_incorrect_version_headings(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description", "2.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("# Test Connector Migration Guide\n## Upgrading to 1.0.0\n## Upgrading to 3.0.0\n")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert f"Migration guide file for {connector.name_from_metadata} has incorrect version headings" in result.message
assert "Expected headings: ['## Upgrading to 2.0.0', '## Upgrading to 1.0.0']" in result.message
def test_pass_when_migration_guide_file_has_correct_version_headings(self, mocker, tmp_path):
# Arrange
connector = mocker.Mock(
name_from_metadata="Test Connector",
technical_name="test-connector",
metadata={"releases": {"breakingChanges": {"1.0.0": "Description", "2.0.0": "Description"}}},
migration_guide_file_path=tmp_path / "migration_guide.md",
)
connector.migration_guide_file_path.write_text("# Test Connector Migration Guide\n## Upgrading to 2.0.0\n## Upgrading to 1.0.0\n")
# Act
result = documentation.CheckMigrationGuide()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert "The migration guide is correctly templated" in result.message
| TestCheckMigrationGuide |
python | joke2k__faker | faker/providers/bank/nl_BE/__init__.py | {
"start": 42,
"end": 2442
} | class ____(BankProvider):
"""Implement bank provider for `nl_BE` locale.
Information about the Belgian banks can be found on the website
of the National Bank of Belgium:
https://www.nbb.be/nl/betalingen-en-effecten/betalingsstandaarden/bankidentificatiecodes
"""
bban_format = "############"
country_code = "BE"
banks = (
"Argenta Spaarbank",
"AXA Bank",
"Belfius Bank",
"BNP Paribas Fortis",
"Bpost Bank",
"Crelan",
"Deutsche Bank AG",
"ING België",
"KBC Bank",
)
swift_bank_codes = (
"ARSP",
"AXAB",
"BBRU",
"BPOT",
"DEUT",
"GEBA",
"GKCC",
"KRED",
"NICA",
)
swift_location_codes = (
"BE",
"B2",
"99",
"21",
"91",
"23",
"3X",
"75",
"2X",
"22",
"88",
"B1",
"BX",
"BB",
)
swift_branch_codes = [
"203",
"BTB",
"CIC",
"HCC",
"IDJ",
"IPC",
"MDC",
"RET",
"VOD",
"XXX",
]
def bban(self) -> str:
"""Generate a valid BBAN."""
account_number = self._generate_account_number()
check_digits = self._calculate_mod97(account_number)
return f"{account_number}{check_digits}"
def iban(self) -> str:
"""Generate a valid IBAN."""
bban = self.bban()
iban_check_digits = self._calculate_iban_check_digits(bban)
return f"{self.country_code}{iban_check_digits}{bban}"
def _generate_account_number(self) -> str:
"""Generate a random 10-digit account number."""
return self.numerify("##########")
def _calculate_mod97(self, account_number: str) -> str:
"""Calculate the mod 97 check digits for a given account number."""
remainder = int(account_number) % 97
return str(remainder).zfill(2) if remainder != 0 else "97"
def _calculate_iban_check_digits(self, bban: str) -> str:
"""Calculate the IBAN check digits using mod 97 algorithm."""
raw_iban = f"{bban}{self.country_code}00"
numeric_iban = "".join(str(ord(char) - 55) if char.isalpha() else char for char in raw_iban)
check_digits = 98 - (int(numeric_iban) % 97)
return str(check_digits).zfill(2)
| Provider |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_business_halfyear.py | {
"start": 989,
"end": 7413
} | class ____:
def test_repr(self):
expected = "<BusinessHalfYearBegin: startingMonth=1>"
assert repr(BHalfYearBegin()) == expected
expected = "<BusinessHalfYearBegin: startingMonth=3>"
assert repr(BHalfYearBegin(startingMonth=3)) == expected
expected = "<BusinessHalfYearBegin: startingMonth=1>"
assert repr(BHalfYearBegin(startingMonth=1)) == expected
def test_offset_corner_case(self):
# corner
offset = BHalfYearBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
offset_cases = []
offset_cases.append(
(
BHalfYearBegin(startingMonth=1),
{
datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1),
datetime(2008, 7, 1): datetime(2009, 1, 1),
datetime(2008, 7, 15): datetime(2009, 1, 1),
},
)
)
offset_cases.append(
(
BHalfYearBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 8, 1),
datetime(2008, 3, 15): datetime(2008, 8, 1),
datetime(2008, 3, 31): datetime(2008, 8, 1),
datetime(2008, 4, 15): datetime(2008, 8, 1),
datetime(2008, 4, 30): datetime(2008, 8, 1),
},
)
)
offset_cases.append(
(
BHalfYearBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1),
datetime(2008, 7, 1): datetime(2008, 7, 1),
datetime(2008, 7, 15): datetime(2009, 1, 1),
},
)
)
offset_cases.append(
(
BHalfYearBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 7, 2),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 1, 1),
datetime(2008, 4, 30): datetime(2008, 1, 1),
datetime(2008, 7, 1): datetime(2008, 1, 1),
datetime(2008, 7, 15): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
BHalfYearBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 2, 15): datetime(2009, 1, 1),
datetime(2008, 2, 29): datetime(2009, 1, 1),
datetime(2008, 3, 15): datetime(2009, 1, 1),
datetime(2008, 3, 31): datetime(2009, 1, 1),
datetime(2008, 4, 15): datetime(2009, 1, 1),
datetime(2008, 4, 1): datetime(2009, 1, 1),
datetime(2008, 7, 15): datetime(2009, 7, 1),
datetime(2008, 7, 1): datetime(2009, 7, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(BHalfYearBegin(1, startingMonth=1), datetime(2008, 1, 1), True),
(BHalfYearBegin(1, startingMonth=1), datetime(2007, 12, 1), False),
(BHalfYearBegin(1, startingMonth=1), datetime(2008, 2, 1), False),
(BHalfYearBegin(1, startingMonth=1), datetime(2007, 3, 1), False),
(BHalfYearBegin(1, startingMonth=1), datetime(2008, 4, 1), False),
(BHalfYearBegin(1, startingMonth=1), datetime(2008, 5, 1), False),
(BHalfYearBegin(1, startingMonth=1), datetime(2007, 6, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2008, 1, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2007, 12, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2008, 2, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2007, 3, 1), True),
(BHalfYearBegin(1, startingMonth=3), datetime(2008, 4, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2008, 5, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2008, 5, 2), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2007, 6, 1), False),
(BHalfYearBegin(1, startingMonth=3), datetime(2007, 6, 2), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2008, 1, 1), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2007, 12, 3), True),
(BHalfYearBegin(1, startingMonth=6), datetime(2008, 2, 1), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2007, 3, 1), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2007, 3, 2), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2008, 4, 1), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2008, 5, 1), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2008, 5, 2), False),
(BHalfYearBegin(1, startingMonth=6), datetime(2007, 6, 1), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| TestBHalfYearBegin |
python | jazzband__django-simple-history | simple_history/tests/admin.py | {
"start": 766,
"end": 930
} | class ____(SimpleHistoryAdmin):
def test_method(self, obj):
return "test_method_value"
history_list_display = ["title", "test_method"]
| FileModelAdmin |
python | PrefectHQ__prefect | tests/_internal/concurrency/test_services.py | {
"start": 1088,
"end": 5053
} | class ____(BatchedQueueService[int]):
_max_batch_size = 2
mock = MagicMock()
def __init__(self, index: Optional[int] = None) -> None:
if index is not None:
super().__init__(index)
else:
super().__init__()
async def _handle_batch(self, items: List[int]):
# Checkpoint to catch errors where async cancellation has occurred
await asyncio.sleep(0)
self.mock(self, items)
print(f"Handled batch for {self}")
@pytest.fixture(autouse=True)
def reset_mock_services():
yield
# Reset mocks
MockService.mock.reset_mock(side_effect=True)
MockBatchedService.mock.reset_mock(side_effect=True)
# Drain all items from the queue
MockService.drain_all()
MockBatchedService.drain_all()
# Shutdown the global loop
wait_for_global_loop_exit()
def test_instance_returns_instance():
instance = MockService.instance()
assert isinstance(instance, MockService)
def test_instance_returns_same_instance():
instance = MockService.instance()
assert MockService.instance() is instance
def test_instance_returns_new_instance_after_stopping():
instance = MockService.instance()
instance._stop()
new_instance = MockService.instance()
assert new_instance is not instance
assert isinstance(new_instance, MockService)
def test_instance_returns_new_instance_with_unique_key():
instance = MockService.instance(1)
new_instance = MockService.instance(2)
assert new_instance is not instance
assert isinstance(new_instance, MockService)
def test_different_subclasses_have_unique_instances():
instance = MockService.instance()
assert isinstance(instance, MockService)
new_instance = MockBatchedService.instance()
assert new_instance is not instance
assert isinstance(new_instance, MockBatchedService)
def test_instance_returns_same_instance_after_error():
event = threading.Event()
def on_handle(*_):
event.set()
raise ValueError("Oh no")
instance = MockService.instance()
instance.mock.side_effect = on_handle
instance.send(1)
# Wait for the service to actually handle the item
event.wait()
new_instance = MockService.instance()
assert new_instance is instance
assert isinstance(new_instance, MockService)
instance.mock.side_effect = None
# The instance can be used still
new_instance.send(2)
new_instance.drain()
new_instance.mock.assert_has_calls([call(instance, 1), call(instance, 2)])
def test_instance_returns_new_instance_after_base_exception():
event = threading.Event()
def on_handle(*_):
event.set()
raise BaseException("Oh no")
instance = MockService.instance()
instance.mock.side_effect = on_handle
instance.send(1)
# Wait for the service to actually handle the item
event.wait()
instance.mock.reset_mock(side_effect=True)
new_instance = MockService.instance()
assert new_instance is not instance
assert isinstance(new_instance, MockService)
# The new instance can be used
new_instance.send(2)
new_instance.drain()
new_instance.mock.assert_called_once_with(new_instance, 2)
def test_send_one():
instance = MockService.instance()
instance.send(1)
MockService.drain_all()
MockService.mock.assert_called_once_with(instance, 1)
def test_send_many():
instance = MockService.instance()
for i in range(10):
instance.send(i)
MockService.drain_all()
MockService.mock.assert_has_calls([call(instance, i) for i in range(10)])
def test_send_many_instances():
instances = []
for i in range(10):
instance = MockService.instance(i)
instance.send(i)
instances.append(instance)
MockService.drain_all()
MockService.mock.assert_has_calls(
[call(instance, i) for instance, i in zip(instances, range(10))], any_order=True
)
| MockBatchedService |
python | walkccc__LeetCode | solutions/2544. Alternating Digit Sum/2544.py | {
"start": 0,
"end": 187
} | class ____:
def alternateDigitSum(self, n: int) -> int:
ans = 0
sign = 1
while n > 0:
sign *= -1
ans += n % 10 * sign
n //= 10
return sign * ans
| Solution |
python | pyca__cryptography | src/cryptography/hazmat/_oid.py | {
"start": 2414,
"end": 4079
} | class ____:
COMMON_NAME = ObjectIdentifier("2.5.4.3")
COUNTRY_NAME = ObjectIdentifier("2.5.4.6")
LOCALITY_NAME = ObjectIdentifier("2.5.4.7")
STATE_OR_PROVINCE_NAME = ObjectIdentifier("2.5.4.8")
STREET_ADDRESS = ObjectIdentifier("2.5.4.9")
ORGANIZATION_IDENTIFIER = ObjectIdentifier("2.5.4.97")
ORGANIZATION_NAME = ObjectIdentifier("2.5.4.10")
ORGANIZATIONAL_UNIT_NAME = ObjectIdentifier("2.5.4.11")
SERIAL_NUMBER = ObjectIdentifier("2.5.4.5")
SURNAME = ObjectIdentifier("2.5.4.4")
GIVEN_NAME = ObjectIdentifier("2.5.4.42")
TITLE = ObjectIdentifier("2.5.4.12")
INITIALS = ObjectIdentifier("2.5.4.43")
GENERATION_QUALIFIER = ObjectIdentifier("2.5.4.44")
X500_UNIQUE_IDENTIFIER = ObjectIdentifier("2.5.4.45")
DN_QUALIFIER = ObjectIdentifier("2.5.4.46")
PSEUDONYM = ObjectIdentifier("2.5.4.65")
USER_ID = ObjectIdentifier("0.9.2342.19200300.100.1.1")
DOMAIN_COMPONENT = ObjectIdentifier("0.9.2342.19200300.100.1.25")
EMAIL_ADDRESS = ObjectIdentifier("1.2.840.113549.1.9.1")
JURISDICTION_COUNTRY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.3")
JURISDICTION_LOCALITY_NAME = ObjectIdentifier("1.3.6.1.4.1.311.60.2.1.1")
JURISDICTION_STATE_OR_PROVINCE_NAME = ObjectIdentifier(
"1.3.6.1.4.1.311.60.2.1.2"
)
BUSINESS_CATEGORY = ObjectIdentifier("2.5.4.15")
POSTAL_ADDRESS = ObjectIdentifier("2.5.4.16")
POSTAL_CODE = ObjectIdentifier("2.5.4.17")
INN = ObjectIdentifier("1.2.643.3.131.1.1")
OGRN = ObjectIdentifier("1.2.643.100.1")
SNILS = ObjectIdentifier("1.2.643.100.3")
UNSTRUCTURED_NAME = ObjectIdentifier("1.2.840.113549.1.9.2")
| NameOID |
python | kamyu104__LeetCode-Solutions | Python/sum-of-imbalance-numbers-of-all-subarrays.py | {
"start": 57,
"end": 850
} | class ____(object):
def sumImbalanceNumbers(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
right = [len(nums)]*len(nums)
lookup = [len(nums)]*((len(nums)+1)+1)
for i in reversed(xrange(len(nums))):
right[i] = min(lookup[nums[i]], lookup[nums[i]+1]) # to avoid duplicated count
lookup[nums[i]] = i
result = left = 0
lookup = [-1]*((len(nums)+1)+1)
for i in xrange(len(nums)):
left = lookup[nums[i]+1]
lookup[nums[i]] = i
result += (i-left)*(right[i]-i)
return result - (len(nums)+1)*len(nums)//2 # since we overcount 1 in each subarray, we have to subtract all of them
# Time: O(n^2)
# Space: O(n)
# hash table, two pointers
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/logger_definition.py | {
"start": 884,
"end": 7332
} | class ____(AnonymousConfigurableDefinition):
"""Core class for defining loggers.
Loggers are job-scoped logging handlers, which will be automatically invoked whenever
dagster messages are logged from within a job.
Args:
logger_fn (Callable[[InitLoggerContext], logging.Logger]): User-provided function to
instantiate the logger. This logger will be automatically invoked whenever the methods
on ``context.log`` are called from within job compute logic.
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in
`init_context.logger_config`. If not set, Dagster will accept any config provided.
description (Optional[str]): A human-readable description of this logger.
"""
def __init__(
self,
logger_fn: "InitLoggerFunction",
config_schema: Any = None,
description: Optional[str] = None,
):
self._logger_fn = check.callable_param(logger_fn, "logger_fn")
self._config_schema = convert_user_facing_definition_config_schema(config_schema)
self._description = check.opt_str_param(description, "description")
def __call__(self, *args, **kwargs):
from dagster._core.definitions.logger_invocation import logger_invocation_result
from dagster._core.execution.context.logger import UnboundInitLoggerContext
if len(args) == 0 and len(kwargs) == 0:
raise DagsterInvalidInvocationError(
"Logger initialization function has context argument, but no context argument was "
"provided when invoking."
)
if len(args) + len(kwargs) > 1:
raise DagsterInvalidInvocationError(
"Initialization of logger received multiple arguments. Only a first "
"positional context parameter should be provided when invoking."
)
context_param_name = get_function_params(self.logger_fn)[0].name
if args:
context = check.opt_inst_param(
args[0],
context_param_name,
UnboundInitLoggerContext,
default=UnboundInitLoggerContext(logger_config=None, job_def=None),
)
return logger_invocation_result(self, context)
else:
if context_param_name not in kwargs:
raise DagsterInvalidInvocationError(
f"Logger initialization expected argument '{context_param_name}'."
)
context = check.opt_inst_param(
kwargs[context_param_name],
context_param_name,
UnboundInitLoggerContext,
default=UnboundInitLoggerContext(logger_config=None, job_def=None),
)
return logger_invocation_result(self, context)
@public
@property
def logger_fn(self) -> "InitLoggerFunction":
"""Callable[[InitLoggerContext], logging.Logger]: The function that will be invoked to
instantiate the logger.
"""
return self._logger_fn
@public
@property
def config_schema(self) -> Any:
"""Any: The schema for the logger's config. Configuration data available in `init_context.logger_config`."""
return self._config_schema
@public
@property
def description(self) -> Optional[str]:
"""Optional[str]: A human-readable description of the logger."""
return self._description
def copy_for_configured(
self,
description: Optional[str],
config_schema: Any,
) -> "LoggerDefinition":
return LoggerDefinition(
config_schema=config_schema,
description=description or self.description,
logger_fn=self.logger_fn,
)
@overload
def logger(
config_schema: CoercableToConfigSchema, description: Optional[str] = ...
) -> Callable[["InitLoggerFunction"], "LoggerDefinition"]: ...
@overload
def logger(
config_schema: "InitLoggerFunction", description: Optional[str] = ...
) -> "LoggerDefinition": ...
@public
def logger(
config_schema: Union[CoercableToConfigSchema, "InitLoggerFunction"] = None,
description: Optional[str] = None,
) -> Union["LoggerDefinition", Callable[["InitLoggerFunction"], "LoggerDefinition"]]:
"""Define a logger.
The decorated function should accept an :py:class:`InitLoggerContext` and return an instance of
:py:class:`python:logging.Logger`. This function will become the ``logger_fn`` of an underlying
:py:class:`LoggerDefinition`.
Args:
config_schema (Optional[ConfigSchema]): The schema for the config. Configuration data available in
`init_context.logger_config`. If not set, Dagster will accept any config provided.
description (Optional[str]): A human-readable description of the logger.
"""
# This case is for when decorator is used bare, without arguments.
# E.g. @logger versus @logger()
if callable(config_schema) and not is_callable_valid_config_arg(config_schema):
return LoggerDefinition(logger_fn=cast("InitLoggerFunction", config_schema))
def _wrap(logger_fn: "InitLoggerFunction") -> "LoggerDefinition":
return LoggerDefinition(
logger_fn=logger_fn,
config_schema=config_schema,
description=description,
)
return _wrap
@public
def build_init_logger_context(
logger_config: Any = None,
job_def: Optional["JobDefinition"] = None,
) -> "UnboundInitLoggerContext":
"""Builds logger initialization context from provided parameters.
This function can be used to provide the context argument to the invocation of a logger
definition.
Note that you may only specify one of pipeline_def and job_def.
Args:
logger_config (Any): The config to provide during initialization of logger.
job_def (Optional[JobDefinition]): The job definition that the logger will be used with.
Examples:
.. code-block:: python
context = build_init_logger_context()
logger_to_init(context)
"""
from dagster._core.definitions import JobDefinition
from dagster._core.execution.context.logger import UnboundInitLoggerContext
check.opt_inst_param(job_def, "job_def", JobDefinition)
return UnboundInitLoggerContext(logger_config=logger_config, job_def=job_def)
| LoggerDefinition |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-python-http-tutorial/source_python_http_tutorial/source.py | {
"start": 4012,
"end": 5217
} | class ____(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, any]:
accepted_currencies = {
"USD",
"JPY",
"BGN",
"CZK",
"DKK",
} # there are more currencies but let's assume these are the only allowed ones
input_currency = config["base"]
if input_currency not in accepted_currencies:
return False, f"Input currency {input_currency} is invalid. Please input one of the following currencies: {accepted_currencies}"
else:
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
# No authentication is required for this API. It's only included for completeness
# of the example, but if you don't need authentication, you don't need to pass an authenticator at all.
# Other authenticators are available for API token-based auth and Oauth2.
auth = None
# Parse the date from a string into a datetime object
start_date = datetime.strptime(config["start_date"], "%Y-%m-%d")
return [ExchangeRates(authenticator=auth, config=config, start_date=start_date)]
| SourcePythonHttpTutorial |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/exc.py | {
"start": 24373,
"end": 24462
} | class ____(DatabaseError):
"""Wraps a DB-API DataError."""
code = "9h9h"
| DataError |
python | has2k1__plotnine | plotnine/geoms/geom_freqpoly.py | {
"start": 144,
"end": 339
} | class ____(geom_path):
"""
Frequency polygon
{usage}
See [](`~plotnine.geoms.geom_path`) for documentation
of the parameters.
"""
DEFAULT_PARAMS = _params
| geom_freqpoly |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 84207,
"end": 84372
} | class ____(BaseModel):
"""
Information of a peer in the cluster
"""
uri: str = Field(..., description="Information of a peer in the cluster")
| PeerInfo |
python | jazzband__django-simple-history | simple_history/registry_tests/migration_test_app/migrations/0006_alter_historicalmodelwithcustomattronetoonefield_options_and_more.py | {
"start": 108,
"end": 912
} | class ____(migrations.Migration):
dependencies = [
(
"migration_test_app",
"0005_historicalmodelwithcustomattronetoonefield_modelwithcustomattronetoonefield",
),
]
operations = [
migrations.AlterModelOptions(
name="historicalmodelwithcustomattronetoonefield",
options={
"get_latest_by": ("history_date", "history_id"),
"ordering": ("-history_date", "-history_id"),
"verbose_name": "historical model with custom attr one to one field",
},
),
migrations.AlterField(
model_name="historicalmodelwithcustomattronetoonefield",
name="history_date",
field=models.DateTimeField(db_index=True),
),
]
| Migration |
python | doocs__leetcode | solution/3500-3599/3550.Smallest Index With Digit Sum Equal to Index/Solution.py | {
"start": 0,
"end": 264
} | class ____:
def smallestIndex(self, nums: List[int]) -> int:
for i, x in enumerate(nums):
s = 0
while x:
s += x % 10
x //= 10
if s == i:
return i
return -1
| Solution |
python | keras-team__keras | keras/src/ops/core_test.py | {
"start": 12237,
"end": 46018
} | class ____(testing.TestCase):
def test_associative_scan(self):
# Test prefix sum
arr = np.arange(5)
result = core.associative_scan(f=operator.add, elems=arr)
self.assertAllEqual(result, [0, 1, 3, 6, 10])
# Test reverse
result = core.associative_scan(f=operator.add, elems=arr, reverse=True)
self.assertAllEqual(result, [10, 10, 9, 7, 4])
# Test multiple dimensions, across different axes
batched_arr = np.stack([arr, arr + 1, arr + 2])
result = core.associative_scan(
f=operator.add, elems=batched_arr, axis=1
)
self.assertAllEqual(result[2], [2, 5, 9, 14, 20])
result = core.associative_scan(
f=operator.add, elems=batched_arr, axis=0
)
self.assertAllEqual(result[:, 0], [0, 1, 3])
# Test structured input
elems = {
"a": np.array([[0, 1, 2], [3, 4, 5]]),
"b": np.array([[6, 7, 8], [9, 10, 11]]),
}
def _dict_add(x, y):
return {"a": x["a"] + y["b"], "b": x["b"] + y["b"]}
ax0 = core.associative_scan(f=_dict_add, elems=elems, axis=0)
self.assertAllEqual(
ax0["b"],
[[6, 7, 8], [15, 17, 19]],
)
# Test parallel scan op used in mamba
b, l, d, n = 1, 2, 3, 4
DB = np.random.rand(b, l, d, n)
DA = np.random.rand(b, l, d, n)
H_seq = np.zeros((b, d, n))
for i in range(l):
H_seq = DA[:, i] * H_seq + DB[:, i]
def scan_op(ci, cj):
a = cj[0] * ci[0]
b = cj[0] * ci[1] + cj[1]
return (a, b)
inputs = (DA.transpose(1, 0, 2, 3), DB.transpose(1, 0, 2, 3))
H_par = core.associative_scan(f=scan_op, elems=inputs)[-1][-1]
self.assertAllClose(H_seq, H_par)
# Test Operation call.
xs = np.arange(5, dtype="float32")
self.assertAllClose(
core.AssociativeScan()(operator.add, xs), ops.cumsum(xs)
)
def test_cast(self):
x = ops.ones((2,), dtype="float32")
y = ops.cast(x, "float16")
self.assertIn("float16", str(y.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.cast(x, "float16")
self.assertEqual("float16", y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
# Test Operation call.
x = ops.ones((2,), dtype="float32")
self.assertDType(core.Cast("float16")(x), "float16")
@parameterized.named_parameters(
("float8_e4m3fn", "float8_e4m3fn"), ("float8_e5m2", "float8_e5m2")
)
def test_cast_float8(self, float8_dtype):
# Cast to float8 and cast back
x = ops.ones((2,), dtype="float32")
y = ops.cast(x, float8_dtype)
self.assertIn(float8_dtype, str(y.dtype))
x = ops.cast(y, "float32")
self.assertIn("float32", str(x.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.cast(x, float8_dtype)
self.assertEqual(float8_dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
x = ops.cast(y, "float32")
self.assertEqual("float32", x.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(x, "_keras_history"))
def test_cond(self):
t = ops.cond(True, lambda: 0, lambda: 1)
self.assertEqual(t, 0)
f = ops.cond(False, lambda: 0, lambda: 1)
self.assertEqual(f, 1)
f = ops.cond(False, lambda: None, lambda: None)
self.assertEqual(f, None)
out = ops.cond(
ops.convert_to_tensor(True),
lambda: ops.ones((1, 3)),
lambda: ops.zeros((1, 3)),
)
self.assertAllClose(out, ops.ones((1, 3)))
out = ops.cond(
ops.convert_to_tensor(False),
lambda: ops.ones((3,)),
lambda: ops.zeros((3,)),
)
self.assertAllClose(out, ops.zeros((3,)))
with self.assertRaises(ValueError):
ops.cond(
KerasTensor((), dtype="bool"),
lambda: ops.ones((3,)),
lambda: ops.zeros((4,)),
)
def test_convert_to_tensor(self):
x = np.ones((2,))
x = ops.convert_to_tensor(x)
x = ops.convert_to_numpy(x)
self.assertAllEqual(x, (1, 1))
self.assertIsInstance(x, np.ndarray)
# Empty lists should give an empty array.
x = ops.convert_to_tensor([])
np_x = ops.convert_to_numpy(x)
self.assertTrue(ops.is_tensor(x))
self.assertAllEqual(x, [])
self.assertIsInstance(np_x, np.ndarray)
# Partially converted.
x = ops.convert_to_tensor((1, ops.array(2), 3))
self.assertAllEqual(x, (1, 2, 3))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason=f"{backend.backend()} backend doesn't support sparse tensors.",
)
def test_convert_to_tensor_sparse(self):
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
x_default = ops.convert_to_tensor(x)
self.assertSparse(x_default)
self.assertAllClose(x, x_default)
x_sparse = ops.convert_to_tensor(x, sparse=True)
self.assertSparse(x_sparse)
self.assertAllClose(x, x_sparse)
x_dense = ops.convert_to_tensor(x, sparse=False)
self.assertSparse(x_dense, False)
self.assertAllClose(x, x_dense)
x_numpy = ops.convert_to_numpy(x)
self.assertIsInstance(x_numpy, np.ndarray)
self.assertAllClose(x_numpy, x_dense)
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason=f"{backend.backend()} backend doesn't support ragged tensors.",
)
def test_convert_to_tensor_ragged(self):
import tensorflow as tf
x = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
x_default = ops.convert_to_tensor(x)
self.assertIsInstance(x_default, tf.RaggedTensor)
self.assertAllClose(x, x_default)
x_ragged = ops.convert_to_tensor(x, ragged=True)
self.assertIsInstance(x_ragged, tf.RaggedTensor)
self.assertAllClose(x, x_ragged)
x_dense = ops.convert_to_tensor(x, ragged=False)
self.assertNotIsInstance(x_dense, tf.RaggedTensor)
self.assertAllClose(x, x_dense)
x_numpy = ops.convert_to_numpy(x)
self.assertIsInstance(x_numpy, np.ndarray)
self.assertAllClose(x_numpy, x_dense)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=(
f"{backend.backend()} backend doesn't support `custom_gradient`."
),
)
def test_custom_gradient(self):
# function to test custom_gradient on
@ops.custom_gradient
def log1pexp(x):
e = ops.exp(x)
def grad(*args, upstream=None):
if upstream is None:
(upstream,) = args
return ops.multiply(upstream, 1.0 - 1.0 / ops.add(1, e))
return ops.log(1 + e), grad
def log1pexp_nan(x):
return ops.log(1 + ops.exp(x))
x = ops.convert_to_tensor(100.0)
if backend.backend() == "tensorflow":
import tensorflow as tf
with tf.GradientTape() as tape1:
tape1.watch(x)
y = log1pexp(x)
with tf.GradientTape() as tape2:
tape2.watch(x)
z = log1pexp_nan(x)
dy_dx = tape1.gradient(y, x)
dz_dx = tape2.gradient(z, x)
self.assertEqual(ops.convert_to_numpy(dy_dx), 1.0)
elif backend.backend() == "jax":
import jax
dy_dx = jax.grad(log1pexp)(x)
dz_dx = jax.grad(log1pexp_nan)(x)
self.assertEqual(ops.convert_to_numpy(dy_dx), 1.0)
self.assertTrue(ops.isnan(dz_dx))
elif backend.backend() == "torch":
import torch
x = torch.tensor(100.0, requires_grad=True)
z = log1pexp(x)
z.sum().backward()
self.assertEqual(ops.convert_to_numpy(x.grad), 1.0)
def test_dynamic_slice(self):
def cond(index, inputs, sum):
return index < 10
def body(index, inputs, sum):
sum = sum + core.slice(inputs, [index], [1])
index = index + 1
return index, inputs, sum
index, inputs, sum = 0, np.arange(10), np.array([0])
index, inputs, sum = core.while_loop(cond, body, (index, inputs, sum))
self.assertEqual(sum.shape, (1,))
self.assertAllClose(sum, [45])
def test_fori_loop(self):
def body_fun(i, x):
return x + i
initial_value = np.array(0)
result = core.fori_loop(0, 10, body_fun, initial_value)
self.assertAllClose(result, 45)
# Test Operation call.
self.assertAllClose(core.ForiLoop(0, 10, body_fun)(initial_value), 45)
def test_getitem(self):
np_tensor = np.arange(24).reshape(2, 3, 4)
tensor = ops.convert_to_tensor(np_tensor)
t = tensor[1]
n = np_tensor[1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, 2, 3]
n = np_tensor[1, 2, 3]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2]
n = np_tensor[1:2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, 2:3, 3:4]
n = np_tensor[1:2, 2:3, 3:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, None]
n = np_tensor[1:2, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, 2:3, ...]
n = np_tensor[1:2, 2:3, ...]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, ..., 3:4]
n = np_tensor[1:2, ..., 3:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., 3:4, None]
n = np_tensor[None, ..., 3:4, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2:None]
n = np_tensor[1:2:None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[:, 2]
n = np_tensor[:, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None]
n = np_tensor[None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, None]
n = np_tensor[None, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[...]
n = np_tensor[...]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., 1]
n = np_tensor[..., 1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., 1, 2]
n = np_tensor[..., 1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., -1, 2]
n = np_tensor[..., -1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., -1:-2, 2]
n = np_tensor[..., -1:-2, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., None, None]
n = np_tensor[..., None, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., None]
n = np_tensor[None, ..., None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, 2, None, ..., None]
n = np_tensor[1, 2, None, ..., None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., 1, 2]
n = np_tensor[None, ..., 1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, None, 2]
n = np_tensor[1, None, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(1, dtype=np.int32))
t = tensor[index_tensor]
n = np_tensor[ops.convert_to_numpy(index_tensor)]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(1, dtype=np.int32))
t = tensor[index_tensor, 2, None]
n = np_tensor[ops.convert_to_numpy(index_tensor), 2, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(-2, dtype=np.int32))
t = tensor[index_tensor, 1]
n = np_tensor[ops.convert_to_numpy(index_tensor), 1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(-1, dtype=np.int32))
t = tensor[-2, index_tensor]
n = np_tensor[-2, ops.convert_to_numpy(index_tensor)]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Negative indexing
t = tensor[-1]
n = np_tensor[-1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, -1, -2]
n = np_tensor[1, -1, -2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Slicing with step
t = tensor[::2]
n = np_tensor[::2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Mixed slices and integers
t = tensor[1, :, 1:4]
n = np_tensor[1, :, 1:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[:, 1:2, 3]
n = np_tensor[:, 1:2, 3]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
def test_is_tensor(self):
np_x = np.array([[1, 2, 3], [3, 2, 1]])
x = backend.convert_to_tensor(np_x)
if backend.backend() != "numpy":
self.assertFalse(ops.is_tensor(np_x))
self.assertTrue(ops.is_tensor(x))
self.assertFalse(ops.is_tensor([1, 2, 3]))
def test_map(self):
def f(x):
return x**2
xs = np.arange(10)
self.assertAllClose(ops.map(f, xs), xs**2)
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = np.random.rand(2, 3, 4).astype("float32")
outputs = ops.map(f2, xs)
self.assertAllClose(outputs["a"], xs**2)
self.assertAllClose(outputs["b"], xs * 10)
# Test with nested structures
def dict_input_fn(inputs):
x = inputs["x"][:, 0]
y = inputs["y"] + 1
return {"x": x, "y": y}
def list_input_fn(inputs):
return [x**2 for x in inputs]
xs = {
"x": ops.convert_to_tensor(
np.random.rand(4, 100, 3), dtype="float32"
),
"y": ops.convert_to_tensor(
np.random.randint(0, 10, size=(4, 1)), dtype="int32"
),
}
xs1 = [
ops.convert_to_tensor(np.random.rand(4, 100, 3), dtype="float32"),
ops.convert_to_tensor(
np.random.randint(0, 10, size=(4, 1)), dtype="int32"
),
]
ys = ops.map(dict_input_fn, xs)
self.assertEqual(ys["x"].shape, (4, 100))
self.assertEqual(
ops.convert_to_numpy(ys["y"]).all(),
ops.convert_to_numpy(xs["y"] + 1).all(),
)
ys = ops.map(list_input_fn, xs1)
for x, y in zip(xs1, ys):
self.assertEqual(
(ops.convert_to_numpy(y)).all(),
(ops.convert_to_numpy(x) ** 2).all(),
)
# Test Operation call.
xs = np.arange(10)
self.assertAllClose(ops.Map()(f, xs), xs**2)
def test_saturate_cast(self):
x = ops.ones((2,), dtype="float32")
y = ops.saturate_cast(x, "float16")
self.assertIn("float16", str(y.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.saturate_cast(x, "float16")
self.assertEqual("float16", y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
# Test Operation call.
x = np.array([-256, 1.0, 257.0], dtype="float32")
y = core.SaturateCast("uint8")(x)
self.assertDType(y, "uint8")
# Check that the values are the same
self.assertAllClose(y, np.clip(x, 0, 255).astype("uint8"))
def test_scan(self):
# Test cumsum
def cumsum(carry, xs):
carry = carry + xs
return carry, carry
init = np.array(0, dtype="float32")
xs = np.array([1, 2, 3, 4, 10, 20], dtype="float32")
carry, result = core.scan(cumsum, init, xs)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
# Test reverse=True
carry, result = core.scan(cumsum, init, xs, reverse=True)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, [40, 39, 37, 34, 30, 20])
# Test unroll
for unroll in (True, False, 2):
carry, result = core.scan(cumsum, init, xs, unroll=unroll)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
# Test xs is None
def fibonaccis(carry, _):
return (carry[1], carry[0] + carry[1]), None
init = (np.array(0, dtype="float32"), np.array(1, dtype="float32"))
carry, _ = core.scan(fibonaccis, init, length=6)
self.assertAllClose(carry, [8, 13])
# Test nested init
if backend.backend() != "tensorflow":
# tensorflow doesn't support arbitrary shape/dtype of the output of
# `f`. It must be the same as `init`.
def multiply_two(carry, _):
value1 = carry["value1"]
value2 = carry["value2"]
return (
{"value1": value1 * 2, "value2": value2 * 2},
value1 * 2 + value2 * 2,
)
init = {"value1": 2.0, "value2": 3.0}
carry, result = core.scan(multiply_two, init, length=3)
self.assertAllClose(carry["value1"], 16)
self.assertAllClose(carry["value2"], 24)
self.assertAllClose(result, [10, 20, 40])
# Test nested xs
def reduce_add(carry, xs):
value1 = xs["value1"]
value2 = xs["value2"]
return carry, value1 + value2
init = np.array(0, dtype="float32")
xs = {
"value1": np.array([1, 2, 3], dtype="float32"),
"value2": np.array([10, 20, 30], dtype="float32"),
}
_, result = core.scan(reduce_add, init, xs)
self.assertAllClose(result, [11, 22, 33])
# Test Operation call.
init = np.array(0, dtype="float32")
xs = np.array([1, 2, 3, 4, 10, 20], dtype="float32")
carry, result = core.Scan()(cumsum, init, xs)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
def test_scatter(self):
# Test 1D
indices = np.array([[1], [3], [4], [7]])
values = np.array([9, 10, 11, 12])
self.assertAllClose(
core.scatter(indices, values, (8,)),
[0, 9, 0, 10, 11, 0, 0, 12],
)
# Test 2D
indices = np.array([[0, 1], [2, 0]])
values = np.array([5, 10])
self.assertAllClose(
core.scatter(indices, values, (3, 2)), [[0, 5], [0, 0], [10, 0]]
)
# Test 3D
indices = np.array([[1], [3]])
values = np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
]
)
self.assertAllClose(
core.scatter(indices, values, (4, 4, 4)),
[
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
],
)
# Test slices
indices = np.array([[2], [4]])
values = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllClose(
core.scatter(indices, values, (6, 3)),
[[0, 0, 0], [0, 0, 0], [1, 2, 3], [0, 0, 0], [4, 5, 6], [0, 0, 0]],
)
# Duplicate indices
indices = np.array([[0], [0]])
values = np.array([1, 1])
self.assertAllClose(core.scatter(indices, values, (1,)), [2])
# Test Operation call.
indices = np.array([[1, 0], [0, 1]])
values = np.array([10, 20])
shape = (2, 2)
self.assertAllClose(
core.Scatter(shape)(indices, values), np.array([[0, 20], [10, 0]])
)
def test_scatter_update(self):
# Test 1D.
inputs = np.array([0, 0, 0, 0, 0, 0, 0, 0])
indices = [[1], [3], [4], [7]]
updates = np.array([9, 10, 11, 12])
self.assertAllClose(
core.scatter_update(inputs, indices, updates),
[0, 9, 0, 10, 11, 0, 0, 12],
)
# Test 2D.
inputs = np.array([[1, 1], [1, 1], [1, 1]])
indices = [[0, 1], [2, 0]]
updates = np.array([5, 10])
self.assertAllClose(
core.scatter_update(inputs, indices, updates),
[[1, 5], [1, 1], [10, 1]],
)
# Test updates has multiple dimension.
inputs = np.ones([4, 4, 4])
indices = [[1, 1], [2, 2]]
updates = np.array([[0, 1, 2, 3], [3, 2, 1, 0]], dtype="float32")
outputs = core.scatter_update(inputs, indices, updates)
self.assertTrue(ops.is_tensor(outputs))
self.assertAllClose(outputs[1, 1, :], [0, 1, 2, 3])
self.assertAllClose(outputs[2, 2, :], [3, 2, 1, 0])
# Test Operation call.
inputs = np.array([[0, 0], [0, 0]])
indices = np.array([[1, 0], [0, 1]])
updates = np.array([10, 20])
self.assertAllClose(
core.ScatterUpdate()(inputs, indices, updates),
np.array([[0, 20], [10, 0]]),
)
def test_shape(self):
x = ops.ones((2, 3, 7, 1))
self.assertEqual(core.shape(x).__class__, tuple)
self.assertAllEqual(core.shape(x), (2, 3, 7, 1))
x = KerasTensor((None, 3, None, 1))
self.assertEqual(core.shape(x).__class__, tuple)
self.assertAllEqual(core.shape(x), (None, 3, None, 1))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason=f"{backend.backend()} backend doesn't support sparse tensors.",
)
def test_shape_sparse(self):
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
self.assertAllEqual(core.shape(x), (2, 3))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason=f"{backend.backend()} backend doesn't support ragged tensors.",
)
def test_shape_ragged(self):
import tensorflow as tf
x = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
self.assertAllEqual(core.shape(x), (5, None))
x = tf.RaggedTensor.from_row_lengths(tf.zeros([15, 2]), [4, 5, 6])
self.assertAllEqual(core.shape(x), (3, None, 2))
def test_slice(self):
# Test 1D.
inputs = np.arange(10)
start_indices = np.array([1])
shape = np.array([4])
self.assertAllClose(
core.slice(inputs, start_indices, shape),
[1, 2, 3, 4],
)
# Test 2D.
inputs = np.broadcast_to(np.arange(10), (4, 10))
start_indices = np.array([1, 1])
shape = np.array([2, 4])
self.assertAllClose(
core.slice(inputs, start_indices, shape),
[[1, 2, 3, 4], [1, 2, 3, 4]],
)
# Test N-D.
inputs = np.broadcast_to(np.arange(10), (4, 4, 4, 10))
start_indices = np.array([1, 1, 1, 1])
shape = np.array([1, 2, 3, 4])
outputs = core.slice(inputs, start_indices, shape)
expected = np.broadcast_to(np.arange(1, 5), (1, 2, 3, 4))
self.assertAllClose(outputs, expected)
# Test Operation call.
inputs = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
start_indices = np.array([1, 1])
shape = (2, 2)
self.assertAllClose(
core.Slice(shape)(inputs, start_indices), np.array([[5, 6], [8, 9]])
)
def test_slice_update(self):
# Test 1D.
inputs = np.array([0, 0, 0, 0, 0, 0, 0, 0])
start_indices = np.array([1])
updates = np.array([9, 10, 11, 12])
self.assertAllClose(
core.slice_update(inputs, start_indices, updates),
[0, 9, 10, 11, 12, 0, 0, 0],
)
# Test 2D.
inputs = np.array([[1, 1], [1, 1], [1, 1]])
start_indices = [1, 0]
updates = np.array([[2, 2], [2, 2]])
self.assertAllClose(
core.slice_update(inputs, start_indices, updates),
[[1, 1], [2, 2], [2, 2]],
)
# Test N-D.
inputs = np.ones([4, 4, 4, 4])
start_indices = [1, 1, 2, 2]
updates = np.zeros([2, 2, 2, 2])
outputs = core.slice_update(inputs, start_indices, updates)
self.assertAllClose(outputs[1:3, 1:3, 2:4, 2:4], np.zeros([2, 2, 2, 2]))
# Test Operation call.
inputs = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
start_indices = np.array([1, 1])
updates = np.array([[10, 11], [12, 13]])
self.assertAllClose(
core.SliceUpdate()(inputs, start_indices, updates),
np.array([[1, 2, 3], [4, 10, 11], [7, 12, 13]]),
)
@pytest.mark.requires_trainable_backend
def test_stop_gradient(self):
class ExampleLayer(layers.Layer):
def __init__(self):
super().__init__()
self.w = self.add_weight(shape=(1,), initializer="zeros")
self.b = self.add_weight(shape=(1,), initializer="zeros")
def call(self, x, training=False):
return ops.add(
ops.multiply(x, ops.stop_gradient(self.w)), self.b
)
model = models.Sequential([ExampleLayer()])
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
rng = np.random.default_rng(0)
x = np.ones((2, 4), dtype="float32")
y = rng.standard_normal((2, 4), dtype="float32")
model.fit(x, y, epochs=1, batch_size=2)
self.assertEqual(model.layers[0].w.numpy(), 0.0)
self.assertNotEqual(model.layers[0].b.numpy(), 0.0)
def test_stop_gradient_no_fit(self):
x = ops.random.uniform(shape=(2, 4), dtype="float32")
y = ops.stop_gradient(x)
self.assertAllClose(x, y)
# Functional.
a = layers.Input(shape=(2,))
b = layers.Dense(4, kernel_initializer="ones", use_bias=False)(a)
c = layers.Dense(4, kernel_initializer="ones", use_bias=False)(b)
d = ops.stop_gradient(b) + c
model = models.Model(inputs=a, outputs=d)
output = model(ops.convert_to_tensor([[1.0, 2.0]]))
self.assertAllClose(output, 15.0)
# Test Operation call.
variable = ops.convert_to_tensor(
np.array([1.0, 2.0, 3.0], dtype="float32")
)
self.assertAllClose(core.StopGradient()(variable), variable)
def test_switch(self):
def fn1(x, y):
return x + y
def fn2(x, y):
return x - y
x = np.random.rand(2, 3, 4).astype("float32")
y = np.random.rand(2, 3, 4).astype("float32")
branches = [fn1, fn2]
self.assertAllClose(core.switch(0, branches, x, y), x + y)
self.assertAllClose(core.switch(1, branches, x, y), x - y)
# Test out-of-bound index
self.assertAllClose(core.switch(-100, branches, x, y), x + y)
self.assertAllClose(core.switch(100, branches, x, y), x - y)
# Test Operation call.
self.assertAllClose(core.Switch()(0, branches, x, y), x + y)
self.assertAllClose(core.Switch()(1, branches, x, y), x - y)
def test_vectorized_map(self):
def fn(x):
return x + 1
output = ops.vectorized_map(fn, ops.zeros((2, 3), dtype="float32"))
self.assertAllClose(backend.convert_to_numpy(output), np.ones((2, 3)))
def fn(x):
return ops.stack([x, x])
output = ops.vectorized_map(fn, ops.zeros((2, 3), dtype="float32"))
self.assertAllClose(
backend.convert_to_numpy(output), np.zeros((2, 2, 3))
)
# Case: multiple args
def fn(elems):
x, y = elems
return x + y
output = ops.vectorized_map(fn, [ops.ones((2, 3)), ops.ones((2, 3))])
self.assertAllClose(output, 2 * np.ones((2, 3)))
@parameterized.named_parameters(
[
{
"testcase_name": "scalar_data_with_max",
"loop_vars": np.array(0),
"expected_output": np.array(5),
"maximum_iterations": 5,
},
{
"testcase_name": "scalar_data_no_max",
"loop_vars": np.array(0),
"expected_output": np.array(10),
"maximum_iterations": None,
},
{
"testcase_name": "nested_data_with_max",
"loop_vars": {
"a": np.array(0),
"b": (np.array(1), np.array(2)),
},
"expected_output": {
"a": np.array(5),
"b": (np.array(6), np.array(7)),
},
"maximum_iterations": 5,
},
{
"testcase_name": "nested_data_no_max",
"loop_vars": {
"a": np.array(0),
"b": (np.array(1), np.array(2)),
},
"expected_output": {
"a": np.array(10),
"b": (np.array(11), np.array(12)),
},
"maximum_iterations": None,
},
]
)
def test_while_loop(self, loop_vars, expected_output, maximum_iterations):
def cond(args):
return tree.flatten(args)[0] < 10
def body(args):
return tree.map_structure(lambda x: x + 1, args)
output = core.while_loop(
cond, body, loop_vars, maximum_iterations=maximum_iterations
)
tree.map_structure(self.assertAllClose, output, expected_output)
# Test Operation call.
output = core.WhileLoop(
cond, body, maximum_iterations=maximum_iterations
)(loop_vars)
tree.map_structure(self.assertAllClose, output, expected_output)
@parameterized.named_parameters(
[
{
"testcase_name": "with_max",
"state": (np.array(0), np.array(1)),
"output": (np.array(5), np.array(6)),
"maximum_iterations": 5,
},
{
"testcase_name": "no_max",
"state": (np.array(0), np.array(1)),
"output": (np.array(10), np.array(11)),
"maximum_iterations": None,
},
]
)
def test_while_loop_list_data(self, state, output, maximum_iterations):
def cond(*args):
return tree.flatten(args)[0] < 10
def body(*args):
return tree.map_structure(lambda x: x + 1, args)
state = core.while_loop(
cond, body, state, maximum_iterations=maximum_iterations
)
tree.map_structure(self.assertAllClose, state, output)
def test_unstack(self):
rng = np.random.default_rng(0)
x = rng.uniform(size=(2, 3, 4))
x_tensor = ops.convert_to_tensor(x)
axis = 1
out = ops.unstack(x_tensor, axis=axis)
out_ex = [x[:, i, :] for i in range(x.shape[axis])]
self.assertEqual(len(out), len(out_ex))
for o, o_e in zip(out, out_ex):
o = ops.convert_to_numpy(o)
self.assertAllClose(o, o_e)
# Test Operation call.
out = ops.Unstack(axis=axis)(x_tensor)
self.assertEqual(len(out), len(out_ex))
for o, o_e in zip(out, out_ex):
o = ops.convert_to_numpy(o)
self.assertAllClose(o, o_e)
| CoreOpsCorrectnessTest |
python | encode__django-rest-framework | tests/test_requests_client.py | {
"start": 541,
"end": 1223
} | class ____(APIView):
def get(self, request):
return Response({
'method': request.method,
'query_params': request.query_params,
})
def post(self, request):
files = {
key: (value.name, value.read())
for key, value in request.FILES.items()
}
post = request.POST
json = None
if request.META.get('CONTENT_TYPE') == 'application/json':
json = request.data
return Response({
'method': request.method,
'query_params': request.query_params,
'POST': post,
'FILES': files,
'JSON': json
})
| Root |
python | psf__black | src/black/trans.py | {
"start": 71246,
"end": 87598
} | class ____(BaseStringSplitter, CustomSplitMapMixin):
"""
StringTransformer that wraps strings in parens and then splits at the LPAR.
Requirements:
All of the requirements listed in BaseStringSplitter's docstring in
addition to the requirements listed below:
* The line is a return/yield statement, which returns/yields a string.
OR
* The line is part of a ternary expression (e.g. `x = y if cond else
z`) such that the line starts with `else <string>`, where <string> is
some string.
OR
* The line is an assert statement, which ends with a string.
OR
* The line is an assignment statement (e.g. `x = <string>` or `x +=
<string>`) such that the variable is being assigned the value of some
string.
OR
* The line is a dictionary key assignment where some valid key is being
assigned the value of some string.
OR
* The line is an lambda expression and the value is a string.
OR
* The line starts with an "atom" string that prefers to be wrapped in
parens. It's preferred to be wrapped when it's is an immediate child of
a list/set/tuple literal, AND the string is surrounded by commas (or is
the first/last child).
Transformations:
The chosen string is wrapped in parentheses and then split at the LPAR.
We then have one line which ends with an LPAR and another line that
starts with the chosen string. The latter line is then split again at
the RPAR. This results in the RPAR (and possibly a trailing comma)
being placed on its own line.
NOTE: If any leaves exist to the right of the chosen string (except
for a trailing comma, which would be placed after the RPAR), those
leaves are placed inside the parentheses. In effect, the chosen
string is not necessarily being "wrapped" by parentheses. We can,
however, count on the LPAR being placed directly before the chosen
string.
In other words, StringParenWrapper creates "atom" strings. These
can then be split again by StringSplitter, if necessary.
Collaborations:
In the event that a string line split by StringParenWrapper is
changed such that it no longer needs to be given its own line,
StringParenWrapper relies on StringParenStripper to clean up the
parentheses it created.
For "atom" strings that prefers to be wrapped in parens, it requires
StringSplitter to hold the split until the string is wrapped in parens.
"""
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if line.leaves[-1].type in OPENING_BRACKETS:
return TErr(
"Cannot wrap parens around a line that ends in an opening bracket."
)
string_idx = (
self._return_match(LL)
or self._else_match(LL)
or self._assert_match(LL)
or self._assign_match(LL)
or self._dict_or_lambda_match(LL)
or self._prefer_paren_wrap_match(LL)
)
if string_idx is not None:
string_value = line.leaves[string_idx].value
# If the string has neither spaces nor East Asian stops...
if not any(
char == " " or char in SPLIT_SAFE_CHARS for char in string_value
):
# And will still violate the line length limit when split...
max_string_width = self.line_length - ((line.depth + 1) * 4)
if str_width(string_value) > max_string_width:
# And has no associated custom splits...
if not self.has_custom_splits(string_value):
# Then we should NOT put this string on its own line.
return TErr(
"We do not wrap long strings in parentheses when the"
" resultant line would still be over the specified line"
" length and can't be split further by StringSplitter."
)
return Ok([string_idx])
return TErr("This line does not contain any non-atomic strings.")
@staticmethod
def _return_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the return/yield statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is a part of a return/yield statement and the first leaf
# contains either the "return" or "yield" keywords...
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
0
].value in ["return", "yield"]:
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _else_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the ternary expression
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is a part of a ternary expression and the first leaf
# contains the "else" keyword...
if (
parent_type(LL[0]) == syms.test
and LL[0].type == token.NAME
and LL[0].value == "else"
):
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _assert_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assert statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is a part of an assert statement and the first leaf
# contains the "assert" keyword...
if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a comma...
if leaf.type == token.COMMA:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That comma MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _assign_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the assignment statement
requirements listed in the 'Requirements' section of this classes'
docstring.
OR
None, otherwise.
"""
# If this line is a part of an expression statement or is a function
# argument AND the first leaf contains a variable name...
if (
parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
and LL[0].type == token.NAME
):
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find either an '=' or '+=' symbol...
if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That symbol MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# The next leaf MAY be a comma iff this line is a part
# of a function argument...
if (
parent_type(LL[0]) == syms.argument
and is_valid_index(idx)
and LL[idx].type == token.COMMA
):
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _dict_or_lambda_match(LL: list[Leaf]) -> int | None:
"""
Returns:
string_idx such that @LL[string_idx] is equal to our target (i.e.
matched) string, if this line matches the dictionary key assignment
statement or lambda expression requirements listed in the
'Requirements' section of this classes' docstring.
OR
None, otherwise.
"""
# If this line is a part of a dictionary key assignment or lambda expression...
parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)]
if syms.dictsetmaker in parent_types or syms.lambdef in parent_types:
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a colon, it can either be dict's or lambda's colon...
if leaf.type == token.COLON and i < len(LL) - 1:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That colon MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
LL = line.leaves
assert len(string_indices) == 1, (
f"{self.__class__.__name__} should only find one match at a time, found"
f" {len(string_indices)}"
)
string_idx = string_indices[0]
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
comma_idx = -1
ends_with_comma = False
if LL[comma_idx].type == token.COMMA:
ends_with_comma = True
leaves_to_steal_comments_from = [LL[string_idx]]
if ends_with_comma:
leaves_to_steal_comments_from.append(LL[comma_idx])
# --- First Line
first_line = line.clone()
left_leaves = LL[:string_idx]
# We have to remember to account for (possibly invisible) LPAR and RPAR
# leaves that already wrapped the target string. If these leaves do
# exist, we will replace them with our own LPAR and RPAR leaves.
old_parens_exist = False
if left_leaves and left_leaves[-1].type == token.LPAR:
old_parens_exist = True
leaves_to_steal_comments_from.append(left_leaves[-1])
left_leaves.pop()
append_leaves(first_line, line, left_leaves)
lpar_leaf = Leaf(token.LPAR, "(")
if old_parens_exist:
replace_child(LL[string_idx - 1], lpar_leaf)
else:
insert_str_child(lpar_leaf)
first_line.append(lpar_leaf)
# We throw inline comments that were originally to the right of the
# target string to the top line. They will now be shown to the right of
# the LPAR.
for leaf in leaves_to_steal_comments_from:
for comment_leaf in line.comments_after(leaf):
first_line.append(comment_leaf, preformatted=True)
yield Ok(first_line)
# --- Middle (String) Line
# We only need to yield one (possibly too long) string line, since the
# `StringSplitter` will break it down further if necessary.
string_value = LL[string_idx].value
string_line = Line(
mode=line.mode,
depth=line.depth + 1,
inside_brackets=True,
should_split_rhs=line.should_split_rhs,
magic_trailing_comma=line.magic_trailing_comma,
)
string_leaf = Leaf(token.STRING, string_value)
insert_str_child(string_leaf)
string_line.append(string_leaf)
old_rpar_leaf = None
if is_valid_index(string_idx + 1):
right_leaves = LL[string_idx + 1 :]
if ends_with_comma:
right_leaves.pop()
if old_parens_exist:
assert right_leaves and right_leaves[-1].type == token.RPAR, (
"Apparently, old parentheses do NOT exist?!"
f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
)
old_rpar_leaf = right_leaves.pop()
elif right_leaves and right_leaves[-1].type == token.RPAR:
# Special case for lambda expressions as dict's value, e.g.:
# my_dict = {
# "key": lambda x: f"formatted: {x}",
# }
# After wrapping the dict's value with parentheses, the string is
# followed by a RPAR but its opening bracket is lambda's, not
# the string's:
# "key": (lambda x: f"formatted: {x}"),
opening_bracket = right_leaves[-1].opening_bracket
if opening_bracket is not None and opening_bracket in left_leaves:
index = left_leaves.index(opening_bracket)
if (
0 < index < len(left_leaves) - 1
and left_leaves[index - 1].type == token.COLON
and left_leaves[index + 1].value == "lambda"
):
right_leaves.pop()
append_leaves(string_line, line, right_leaves)
yield Ok(string_line)
# --- Last Line
last_line = line.clone()
last_line.bracket_tracker = first_line.bracket_tracker
new_rpar_leaf = Leaf(token.RPAR, ")")
if old_rpar_leaf is not None:
replace_child(old_rpar_leaf, new_rpar_leaf)
else:
insert_str_child(new_rpar_leaf)
last_line.append(new_rpar_leaf)
# If the target string ended with a comma, we place this comma to the
# right of the RPAR on the last line.
if ends_with_comma:
comma_leaf = Leaf(token.COMMA, ",")
replace_child(LL[comma_idx], comma_leaf)
last_line.append(comma_leaf)
yield Ok(last_line)
| StringParenWrapper |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 36862,
"end": 37077
} | class ____(BoringModel):
def on_validation_epoch_start(self):
if not self.trainer.sanity_checking and self.current_epoch == 1:
raise RuntimeError("Trouble!")
| TroubledModelOnValidationEpochStart |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_index_refs.py | {
"start": 937,
"end": 2881
} | class ____(unittest.TestCase):
d = 32
nv = 1000
nlist = 10
res = faiss.StandardGpuResources() # pyre-ignore
db = np.random.rand(nv, d)
# These GPU classes reference another index.
# This tests to make sure the deletion of the other index
# does not cause a crash.
def test_GpuIndexIVFFlat(self):
index_to_delete = faiss.IndexIVFFlat(
faiss.IndexFlat(self.d), self.d, self.nlist
)
idx = faiss.GpuIndexIVFFlat(
self.res, index_to_delete, faiss.GpuIndexIVFFlatConfig()
)
do_multi_test(idx, index_to_delete, self.db)
def test_GpuIndexBinaryFlat(self):
ds = SyntheticDataset(64, 1000, 1000, 200)
index_to_delete = faiss.IndexBinaryFlat(ds.d)
idx = faiss.GpuIndexBinaryFlat(self.res, index_to_delete)
tobinary = faiss.index_factory(ds.d, "LSHrt")
tobinary.train(ds.get_train())
xb = tobinary.sa_encode(ds.get_database())
do_multi_test(idx, index_to_delete, xb)
def test_GpuIndexFlat(self):
index_to_delete = faiss.IndexFlat(self.d, faiss.METRIC_L2)
idx = faiss.GpuIndexFlat(self.res, index_to_delete)
do_multi_test(idx, index_to_delete, self.db)
def test_GpuIndexIVFPQ(self):
index_to_delete = faiss.IndexIVFPQ(
faiss.IndexFlatL2(self.d),
self.d, self.nlist, 2, 8)
idx = faiss.GpuIndexIVFPQ(self.res, index_to_delete)
do_multi_test(idx, index_to_delete, self.db)
def test_GpuIndexIVFScalarQuantizer(self):
index_to_delete = faiss.IndexIVFScalarQuantizer(
faiss.IndexFlat(self.d, faiss.METRIC_L2),
self.d,
self.nlist,
faiss.ScalarQuantizer.QT_8bit_direct,
faiss.METRIC_L2,
False
)
idx = faiss.GpuIndexIVFScalarQuantizer(self.res, index_to_delete)
do_multi_test(idx, index_to_delete, self.db)
| TestRefs |
python | doocs__leetcode | solution/1200-1299/1201.Ugly Number III/Solution.py | {
"start": 0,
"end": 592
} | class ____:
def nthUglyNumber(self, n: int, a: int, b: int, c: int) -> int:
ab = lcm(a, b)
bc = lcm(b, c)
ac = lcm(a, c)
abc = lcm(a, b, c)
l, r = 1, 2 * 10**9
while l < r:
mid = (l + r) >> 1
if (
mid // a
+ mid // b
+ mid // c
- mid // ab
- mid // bc
- mid // ac
+ mid // abc
>= n
):
r = mid
else:
l = mid + 1
return l
| Solution |
python | pypa__pip | src/pip/_vendor/distlib/scripts.py | {
"start": 3009,
"end": 18609
} | class ____(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (os.name == 'java' and os._name == 'nt')
self.version_info = sys.version_info
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _build_shebang(self, executable, post_interp):
"""
Build a shebang line. In the simple case (on Windows, or a shebang line
which is not too long or contains spaces) use a simple formulation for
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
shebang which allows the script to run either under Python or sh, using
suitable quoting. Thanks to Harald Nordgren for his input.
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
https://hg.mozilla.org/mozilla-central/file/tip/mach
"""
if os.name != 'posix':
simple_shebang = True
elif getattr(sys, "cross_compiling", False):
# In a cross-compiling environment, the shebang will likely be a
# script; this *must* be invoked with the "safe" version of the
# shebang, or else using os.exec() to run the entry script will
# fail, raising "OSError 8 [Errno 8] Exec format error".
simple_shebang = False
else:
# Add 3 for '#!' prefix and newline suffix.
shebang_length = len(executable) + len(post_interp) + 3
if sys.platform == 'darwin':
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length))
if simple_shebang:
result = b'#!' + executable + post_interp + b'\n'
else:
result = b'#!/bin/sh\n'
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
result += b"' '''\n"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
if os.name == 'nt':
# for Python builds from source on Windows, no Python executables with
# a version suffix are created, so we use python.exe
executable = os.path.join(sysconfig.get_config_var('BINDIR'),
'python%s' % (sysconfig.get_config_var('EXE')))
else:
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows - COMMENTED OUT
# executable = os.path.normcase(executable)
# N.B. The normalising operation above has been commented out: See
# issue #124. Although paths in Windows are generally case-insensitive,
# they aren't always. For example, a path containing a ẞ (which is a
# LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
# LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
# Windows as equivalent in path names.
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp and
'-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError('The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError('The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(
module=entry.prefix, import_name=entry.suffix.split('.')[0], func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
if not use_launcher:
script_bytes = shebang + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
if source_date_epoch:
date_time = time.gmtime(int(source_date_epoch))[:6]
zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
zf.writestr(zinfo, script_bytes)
else:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
variant_separator = '-'
def get_script_filenames(self, name):
result = set()
if '' in self.variants:
result.add(name)
if 'X' in self.variants:
result.add('%s%s' % (name, self.version_info[0]))
if 'X.Y' in self.variants:
result.add('%s%s%s.%s' % (name, self.variant_separator, self.version_info[0], self.version_info[1]))
return result
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
scriptnames = self.get_script_filenames(entry.name)
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s is an empty file (skipping)', script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script, self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
name = '%s%s%s.exe' % (kind, bits, platform_suffix)
if name not in WRAPPERS:
msg = ('Unable to find resource %s in package %s' %
(name, DISTLIB_PACKAGE))
raise ValueError(msg)
return WRAPPERS[name]
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| ScriptMaker |
python | getsentry__sentry | src/sentry/replays/data_export.py | {
"start": 2056,
"end": 26066
} | class ____(Protocol):
def __call__(self, limit: int, offset: int) -> Request: ...
def rows_to_csv(rows: list[dict[str, Any]]) -> str:
buf = io.StringIO()
writer = csv.writer(buf)
for i, row in enumerate(rows):
if i == 0:
writer.writerow(row.keys())
writer.writerow(row.values())
return buf.getvalue()
def export_clickhouse_rows(
query_fn: QueryFnProtocol,
referrer: str = Referrer.EU_DATA_EXPORT.value,
num_pages: int = EXPORT_QUERY_PAGES_PER_TASK,
limit: int = EXPORT_QUERY_ROWS_PER_PAGE,
offset: int = 0,
) -> Generator[dict[str, Any]]:
"""
ClickHouse row export.
:param query_fn: Any function which returns a request which is paginatable by limit and offset.
:param referrer: A unique identifier for a given data-export query.
:param limit: The number of rows to limit the query by.
:param offset: The initial offset value to offset the query by.
:param num_pages: The maximum number of pages we'll query before exiting. The number of pages
we query is intentionally capped. This ensures termination and encourages appropriate
bounding by the calling function.
:param max_retries: The maximum number of queries we'll make to the database before quitting.
:param retry_after_seconds: The number of seconds to wait after each query failure.
"""
assert limit > 0, "limit must be a positive integer greater than zero."
assert num_pages > 0, "num_pages must be a positive integer greater than zero."
assert offset >= 0, "offset must be a positive integer greater than or equal to zero."
# Iteration is capped to a maximum number of pages. This ensures termination and encourages
# appropriate bounding by the calling function. Ideally this export is ran in an asynchonrous
# task. Tasks typically have a deadline so iterating forever is undesireable. Each task should
# process a chunk of data commit it (and perhaps its progress) and then schedule another task
# to complete the remainder of the job which itself is bounded.
for _ in range(num_pages):
results = raw_snql_query(query_fn(limit=limit, offset=offset), referrer)["data"]
if results:
yield from results
offset += len(results)
if len(results) != limit:
break
# $$$$$$\ $$$$$$\ $$$$$$\
# $$ __$$\ $$ __$$\ $$ __$$\
# $$ / \__|$$ / \__|$$ / \__|
# $$ |$$$$\ $$ | \$$$$$$\
# $$ |\_$$ |$$ | \____$$\
# $$ | $$ |$$ | $$\ $$\ $$ |
# \$$$$$$ |\$$$$$$ |\$$$$$$ |
# \______/ \______/ \______/
def request_create_transfer_job(request: CreateTransferJobRequest) -> TransferJob:
client = storage_transfer_v1.StorageTransferServiceClient()
return client.create_transfer_job(request)
def create_transfer_job[T](
gcp_project_id: str,
transfer_job_name: str | None,
source_bucket: str,
source_prefix: str,
destination_bucket: str,
destination_prefix: str,
job_description: str,
do_create_transfer_job: Callable[[CreateTransferJobRequest], T],
notification_topic: str | None = None,
get_current_datetime: Callable[[], datetime] = lambda: datetime.now(tz=timezone.utc),
) -> T:
"""
Create a transfer-job which copies a bucket by prefix to another bucket.
Transfer jobs are templates for transfer-job-runs. Transfer jobs are run based on a schedule.
To run a job once, the schedule start and end dates are set to the same day.
Automatic run creation based on the schedule is one-time only. If it fails or you want to run
the transfer-job twice you will need to manually create a transfer-job-run on the second attempt.
Failure notifications are handled by pubsub. When the transfer service fails it will send a
notification to the specified topic. That topic should be configured to propagate the failure
notice to our HTTP endpoint which will then call the appropriate retry function.
:param gcp_project_id: The GCP project_id. This can be extracted from the storage class
returned by `get_storage` function.
:param source_bucket:
:param source_prefix:
:param destination_bucket:
:param destination_prefix:
:param notification_topic: topic to which we'll notify the success or failure of the transfer.
:param do_create_transfer_job: Injected function which creates the transfer-job.
:param get_current_datetime: Injected function which computes the current datetime.
"""
date_job_starts = get_current_datetime()
# To make this a one-shot job, the start and end dates must be the same.
date_job_ends = date_job_starts
transfer_job = TransferJob(
description=job_description,
project_id=gcp_project_id,
status=storage_transfer_v1.TransferJob.Status.ENABLED,
transfer_spec=TransferSpec(
gcs_data_source=GcsData(bucket_name=source_bucket, path=source_prefix),
gcs_data_sink=GcsData(bucket_name=destination_bucket, path=destination_prefix),
),
schedule=Schedule(
schedule_start_date=date_pb2.Date(
year=date_job_starts.year,
month=date_job_starts.month,
day=date_job_starts.day,
),
schedule_end_date=date_pb2.Date(
year=date_job_ends.year,
month=date_job_ends.month,
day=date_job_ends.day,
),
),
)
if notification_topic:
transfer_job.notification_config = NotificationConfig(
pubsub_topic=f"projects/{gcp_project_id}/topics/{notification_topic}",
event_types=[
NotificationConfig.EventType.TRANSFER_OPERATION_FAILED,
NotificationConfig.EventType.TRANSFER_OPERATION_SUCCESS,
NotificationConfig.EventType.TRANSFER_OPERATION_ABORTED,
],
payload_format=NotificationConfig.PayloadFormat.JSON,
)
if transfer_job_name:
transfer_job.name = transfer_job_name
request = CreateTransferJobRequest(transfer_job=transfer_job)
return do_create_transfer_job(request)
def request_run_transfer_job(request: RunTransferJobRequest) -> None:
client = storage_transfer_v1.StorageTransferServiceClient()
client.run_transfer_job(request)
return None
def retry_transfer_job_run[T](
event: dict[str, Any],
do_run_transfer_job: Callable[[RunTransferJobRequest], T],
) -> T | None:
"""
Retry a failed transfer job run.
This function expects an event structured in the Google Cloud pubsub notification format.
:param event:
:param do_run_transfer_job: Any callback function which triggers a `run_transfer_job` action
on GCP. You should use `request_run_transfer_job` by default unless you need to manually
specify credentials or have some other divergent behavior.
"""
if "data" not in event:
return None
# Decode the Pub/Sub message payload
message = base64.b64decode(event["data"]).decode("utf-8")
payload = json.loads(message)
# Check for a failed transfer operation
if "transferOperation" in payload and payload["transferOperation"]["status"] == "FAILED":
job_name = payload["transferOperation"]["transferJobName"]
gcp_project_id = payload["transferOperation"]["projectId"]
request = RunTransferJobRequest(job_name=job_name, project_id=gcp_project_id)
return do_run_transfer_job(request)
return None
# $$$$$$$\ $$$$$$$$\ $$$$$$$\ $$\ $$$$$$\ $$\ $$\
# $$ __$$\ $$ _____|$$ __$$\ $$ | $$ __$$\\$$\ $$ |
# $$ | $$ |$$ | $$ | $$ |$$ | $$ / $$ |\$$\ $$ /
# $$$$$$$ |$$$$$\ $$$$$$$ |$$ | $$$$$$$$ | \$$$$ /
# $$ __$$< $$ __| $$ ____/ $$ | $$ __$$ | \$$ /
# $$ | $$ |$$ | $$ | $$ | $$ | $$ | $$ |
# $$ | $$ |$$$$$$$$\ $$ | $$$$$$$$\ $$ | $$ | $$ |
# \__| \__|\________|\__| \________|\__| \__| \__|
def query_replays_dataset(
project_id: int,
start: datetime,
end: datetime,
limit: int,
offset: int,
) -> Request:
assert start < end, "Start date must be less than the ending date."
assert project_id > 0, "Project ID must be greater than zero."
assert limit > 0, "limit must be a positive integer greater than zero."
assert offset >= 0, "offset must be a positive integer greater than or equal to zero."
def hash_(value: Column | str) -> Function:
return Function("cityHash64", parameters=[value])
query = Query(
match=Entity("replays"),
select=[
Column("replay_id"),
Column("debug_id"),
Column("count_info_events"),
Column("count_warning_events"),
Column("count_error_events"),
Column("info_id"),
Column("warning_id"),
Column("error_id"),
Column("fatal_id"),
Column("replay_type"),
Column("error_sample_rate"),
Column("session_sample_rate"),
Column("event_hash"),
Column("segment_id"),
Column("trace_ids"),
Column("title"),
Column("url"),
Column("urls"),
Column("is_archived"),
Column("error_ids"),
Column("project_id"),
Column("timestamp"),
Column("replay_start_timestamp"),
Column("platform"),
Column("environment"),
Column("release"),
Column("dist"),
Column("ip_address_v4"),
Column("ip_address_v6"),
Column("user"),
Column("user_id"),
Column("user_name"),
Column("user_email"),
Column("user_geo_city"),
Column("user_geo_country_code"),
Column("user_geo_region"),
Column("user_geo_subdivision"),
Column("viewed_by_id"),
Column("os_name"),
Column("os_version"),
Column("browser_name"),
Column("browser_version"),
Column("device_name"),
Column("device_brand"),
Column("device_family"),
Column("device_model"),
Column("ota_updates_channel"),
Column("ota_updates_runtime_version"),
Column("ota_updates_update_id"),
Column("sdk_name"),
Column("sdk_version"),
Column("tags.key"),
Column("tags.value"),
Column("click_node_id"),
Column("click_tag"),
Column("click_id"),
Column("click_class"),
Column("click_text"),
Column("click_role"),
Column("click_alt"),
Column("click_testid"),
Column("click_aria_label"),
Column("click_title"),
Column("click_component_name"),
Column("click_is_dead"),
Column("click_is_rage"),
Column("count_errors"),
Column("count_urls"),
Column("retention_days"),
Column("partition"),
Column("offset"),
],
where=[
Condition(Column("project_id"), Op.EQ, project_id),
Condition(Column("timestamp"), Op.GTE, start),
Condition(Column("timestamp"), Op.LT, end),
],
orderby=[
OrderBy(Column("project_id"), Direction.ASC),
OrderBy(Function("toStartOfDay", parameters=[Column("timestamp")]), Direction.ASC),
OrderBy(hash_(Column("replay_id")), Direction.ASC),
OrderBy(Column("event_hash"), Direction.ASC),
],
limit=Limit(limit),
offset=Offset(offset),
)
return Request(
dataset="replays",
app_id="replay-backend-web",
query=query,
tenant_ids={},
)
def get_replay_date_query_ranges(
project_id: int,
referrer: str = Referrer.EU_DATA_EXPORT.value,
) -> Generator[tuple[datetime, datetime, int]]:
"""
SQL:
SELECT formatDateTime(toStartOfDay(timestamp), '%F'), count()
FROM replays_dist
WHERE project_id = 11276
GROUP BY toStartOfDay(timestamp)
ORDER BY toStartOfDay(timestamp)
"""
to_start_of_day_timestamp = Function("toStartOfDay", parameters=[Column("timestamp")])
# Snuba requires a start and end range but we don't know the start and end yet! We specify an
# arbitrarily large range to accommodate. If you're debugging a failed export in the year 3000
# I am very sorry for the inconvenience this has caused you.
min_date = datetime(year=1970, month=1, day=1)
max_date = datetime(year=3000, month=1, day=1)
query = Query(
match=Entity("replays"),
select=[
Function("formatDateTime", parameters=[to_start_of_day_timestamp, "%F"], alias="day"),
Function("count", parameters=[], alias="max_rows_to_export"),
],
where=[
Condition(Column("project_id"), Op.EQ, project_id),
Condition(Column("timestamp"), Op.GTE, min_date),
Condition(Column("timestamp"), Op.LT, max_date),
],
orderby=[OrderBy(to_start_of_day_timestamp, Direction.ASC)],
groupby=[to_start_of_day_timestamp],
)
request = Request(
dataset="replays",
app_id="replay-backend-web",
query=query,
tenant_ids={},
)
results = raw_snql_query(request, referrer)["data"]
for result in results:
start = datetime.fromisoformat(result["day"])
end = start + timedelta(days=1)
yield start, end, result["max_rows_to_export"]
def export_replay_row_set(
project_id: int,
start: datetime,
end: datetime,
limit: int,
initial_offset: int,
write_to_storage: Callable[[str, str], None],
num_pages: int = EXPORT_QUERY_PAGES_PER_TASK,
) -> int | None:
rows = list(
export_clickhouse_rows(
lambda limit, offset: query_replays_dataset(project_id, start, end, limit, offset),
limit=limit,
offset=initial_offset,
num_pages=num_pages,
)
)
if len(rows) > 0:
filename = f"clickhouse/session-replay/{project_id}/{start.isoformat()}/{end.isoformat()}/{initial_offset}"
csv_data = rows_to_csv(rows)
write_to_storage(filename, csv_data)
if len(rows) == (limit * num_pages):
return initial_offset + len(rows)
else:
return None
def save_to_storage(destination_bucket: str, filename: str, contents: str) -> None:
storage = get_storage(None)
assert isinstance(storage, GoogleCloudStorage)
storage.bucket_name = destination_bucket
storage.save(filename, io.BytesIO(contents.encode()))
@instrumented_task(
name="sentry.replays.tasks.export_replay_row_set_async",
namespace=replays_tasks,
processing_deadline_duration=15 * 60,
retry=Retry(times=120, delay=5),
)
def export_replay_row_set_async(
project_id: int,
start: datetime,
end: datetime,
destination_bucket: str,
max_rows_to_export: int,
limit: int = EXPORT_QUERY_ROWS_PER_PAGE,
offset: int = 0,
num_pages: int = EXPORT_QUERY_PAGES_PER_TASK,
):
"""
Export all replay rows which belong to the project and exist within the range.
:param project_id: Sentry Project ID.
:param start: Inclusive, minimum date in the queried range.
:param end: Exclusive, maximum date in the queried range.
:param destination_bucket: Which bucket the resulting CSV will be uploaded for.
:param max_rows_to_export: The maximum number of rows which may be executed by this task
chain. The max_rows_to_export value should match the number of rows present in your range.
This value is specified to protect against malformed behavior in the code which might
produce infinite (or at least very long) task recursion.
:param file_number: The file's position in the export sequence. Incremented by one each time
the task is chained. This keeps filenames predictable and ordered.
:param limit: The maximum number of rows to query by for a given page.
:param offset: The offset within the query range to query for. Must constantly increment and
never overlap with previous runs.
:param num_pages: The maximum number of pages to query per task.
"""
assert limit > 0, "Limit must be greater than 0."
assert offset >= 0, "Offset must be greater than or equal to 0."
assert start < end, "Start must be before end date."
assert num_pages > 0, "num_pages must be greater than 0."
next_offset = export_replay_row_set(
project_id,
start,
end,
limit,
offset,
lambda filename, contents: save_to_storage(destination_bucket, filename, contents),
num_pages,
)
# Tasks can run for a defined length of time. The export can take an unbounded length of time
# to complete. For this reason we cap the amount of work we'll perform within a single task's
# lifetime and schedule the remainder of the work to take place on another task.
#
# The call chain is explicitly terminated by a pre-computed max_rows_to_export value. If this
# value is exceeded the chain exits immediately even if more rows could have been found. Its
# unlikely there will be more rows because in order to export your data you need to terminate
# your Sentry account. Under those conditions you're no longer a Sentry customer and should
# not be ingesting any data into Sentry.
if next_offset and next_offset < max_rows_to_export:
# We assert the call chain is making forward progress.
assert next_offset > offset, "next_offset was not greater than previous offset."
# We assert the call chain is making meaningful progress. We should not overlap.
assert next_offset == (offset + (limit * num_pages)), "next_offset overlapped previous run."
export_replay_row_set_async.delay(
project_id=project_id,
start=start,
end=end,
limit=limit,
offset=next_offset,
destination_bucket=destination_bucket,
max_rows_to_export=max_rows_to_export,
num_pages=num_pages,
)
@instrumented_task(
name="sentry.replays.tasks.export_replay_project_async",
namespace=replays_tasks,
)
def export_replay_project_async(
project_id: int,
limit: int,
destination_bucket: str,
num_pages: int = EXPORT_QUERY_PAGES_PER_TASK,
):
"""
Export every replay for a given Sentry Project ID.
A task will be spawned for each day and will export that day's rows. This means we have a
maximum parallelism of 90 simultaneous processes. This value may be lower given the demand on
the task broker itself. If more parallelism is desired you will need to tweak the granularity
of the `get_replay_date_query_ranges` query.
:param project_id: Sentry Project ID.
:param limit: The maximum number of rows to query for in any given replay.
:param destination_bucket:
:param num_pages: The maximum number of pages to query for within a single task execution.
"""
# Each populated day bucket is scheduled for export.
for start, end, max_rows_to_export in get_replay_date_query_ranges(project_id):
export_replay_row_set_async.delay(
project_id=project_id,
start=start,
end=end,
destination_bucket=destination_bucket,
max_rows_to_export=max_rows_to_export,
limit=limit,
offset=0,
num_pages=num_pages,
)
def export_replay_blob_data[T](
project_id: int,
gcp_project_id: str,
destination_bucket: str,
destination_prefix: str,
do_create_transfer_job: Callable[[CreateTransferJobRequest], T],
pubsub_topic_name: str | None = None,
source_bucket: str = EXPORT_JOB_SOURCE_BUCKET,
) -> list[T]:
# In the future we could set a non-unique transfer-job name. This would prevent duplicate runs
# from doing the same work over and over again. However, we'd need to catch the exception,
# look-up any active runs, and, if no active runs, schedule a new run. This is a bit much for
# now.
#
# transfer_job_name = f"{source_bucket}/{project_id}/{start_date_rounded_to_day}"
jobs = []
for retention_days in (30, 60, 90):
jobs.append(
create_transfer_job(
gcp_project_id=gcp_project_id,
transfer_job_name=None,
source_bucket=source_bucket,
source_prefix=f"{retention_days}/{project_id}/",
destination_bucket=destination_bucket,
destination_prefix=destination_prefix,
notification_topic=pubsub_topic_name,
job_description="Session Replay EU Compliance Export",
do_create_transfer_job=do_create_transfer_job,
)
)
return jobs
def export_replay_data(
organization_id: int,
gcp_project_id: str,
destination_bucket: str,
destination_prefix: str,
database_rows_per_page: int = EXPORT_QUERY_ROWS_PER_PAGE,
database_pages_per_task: int = EXPORT_QUERY_PAGES_PER_TASK,
source_bucket: str = EXPORT_JOB_SOURCE_BUCKET,
pubsub_topic_name: str | None = None,
):
logger.info(
"Starting replay export...",
extra={
"organization_id": organization_id,
"gcp_project_id": gcp_project_id,
"destination_bucket": destination_bucket,
"database_rows_per_page": database_rows_per_page,
"database_pages_per_task": database_pages_per_task,
"source_bucket": source_bucket,
"pubsub_topic_name": pubsub_topic_name,
},
)
try:
organization = Organization.objects.filter(id=organization_id).get()
logger.info("Found organization", extra={"organization.slug": organization.slug})
except Organization.DoesNotExist:
logger.exception("Could not find organization", extra={"organization.id": organization_id})
return None
projects = list(
Project.objects.filter(
organization_id=organization_id, flags=F("flags").bitor(Project.flags.has_replays)
)
)
if not projects:
logger.info("No projects with replays found.")
return None
logger.info("Found projects with replays.", extra={"number_of_projects": len(projects)})
for project in projects:
logger.info(
"Starting recording export job for project", extra={"project_slug": project.slug}
)
export_replay_blob_data(
project_id=project.id,
gcp_project_id=gcp_project_id,
destination_bucket=destination_bucket,
destination_prefix=destination_prefix,
pubsub_topic_name=pubsub_topic_name,
source_bucket=source_bucket,
do_create_transfer_job=request_create_transfer_job,
)
logger.info("Successfully scheduled recording export job.")
for project in projects:
logger.info(
"Starting database export job for project", extra={"project_slug": project.slug}
)
export_replay_project_async.delay(
project_id=project.id,
limit=database_rows_per_page,
destination_bucket=destination_bucket,
num_pages=database_pages_per_task,
)
logger.info("Successfully scheduled database export job.")
# Really need a way to signal an export has finished or failed. Probably a screen in the
# application exposed to the customer or admins. This will require database models, front-end
# engineers, API blueprints, a concept of a work group...
logger.info("Export finished! It will run in the background. No further action is required.")
| QueryFnProtocol |
python | Delgan__loguru | tests/test_add_option_enqueue.py | {
"start": 143,
"end": 308
} | class ____:
def __getstate__(self):
raise pickle.PicklingError("You shall not serialize me!")
def __setstate__(self, state):
pass
| NotPicklable |
python | bottlepy__bottle | bottle.py | {
"start": 22417,
"end": 44208
} | class ____:
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
@lazy_attribute
def _global_config(cls):
cfg = ConfigDict()
cfg.meta_set('catchall', 'validate', bool)
return cfg
def __init__(self, **kwargs):
#: A :class:`ConfigDict` for app specific configuration.
self.config = self._global_config._make_overlay()
self.config._add_change_listener(
functools.partial(self.trigger_hook, 'config'))
self.config.update({
"catchall": True
})
if kwargs.get('catchall') is False:
depr(0, 13, "Bottle(catchall) keyword argument.",
"The 'catchall' setting is now part of the app "
"configuration. Fix: `app.config['catchall'] = False`")
self.config['catchall'] = False
if kwargs.get('autojson') is False:
depr(0, 13, "Bottle(autojson) keyword argument.",
"The 'autojson' setting is now part of the app "
"configuration. Fix: `app.config['json.enable'] = False`")
self.config['json.enable'] = False
self._mounts = []
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = {'after_request'}
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def _mount_wsgi(self, prefix, app, **options):
segments = [p for p in prefix.split('/') if p]
if not segments:
raise ValueError('WSGI applications cannot be mounted to "/".')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
status = _wsgi_recode(status)
headerlist = [(k, _wsgi_recode(v)) for (k, v) in headerlist]
rs.status = status
for name, value in headerlist:
rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
rs.body = itertools.chain(rs.body, body) if rs.body else body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def _mount_app(self, prefix, app, **options):
if app in self._mounts or '_mount.app' in app.config:
depr(0, 13, "Application mounted multiple times. Falling back to WSGI mount.",
"Clone application before mounting to a different location.")
return self._mount_wsgi(prefix, app, **options)
if options:
depr(0, 13, "Unsupported mount options. Falling back to WSGI mount.",
"Do not specify any route options when mounting bottle application.")
return self._mount_wsgi(prefix, app, **options)
if not prefix.endswith("/"):
depr(0, 13, "Prefix must end in '/'. Falling back to WSGI mount.",
"Consider adding an explicit redirect from '/prefix' to '/prefix/' in the parent application.")
return self._mount_wsgi(prefix, app, **options)
self._mounts.append(app)
app.config['_mount.prefix'] = prefix
app.config['_mount.app'] = self
for route in app.routes:
route.rule = prefix + route.rule.lstrip('/')
self.add_route(route)
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
parent_app.mount('/prefix/', child_app)
:param prefix: path prefix or `mount-point`.
:param app: an instance of :class:`Bottle` or a WSGI application.
Plugins from the parent application are not applied to the routes
of the mounted child application. If you need plugins in the child
application, install them separately.
While it is possible to use path wildcards within the prefix path
(:class:`Bottle` childs only), it is highly discouraged.
The prefix path must end with a slash. If you want to access the
root of the child application via `/prefix` in addition to
`/prefix/`, consider adding a route with a 307 redirect to the
parent application.
"""
if not prefix.startswith('/'):
raise ValueError("Prefix must start with '/'")
if isinstance(app, Bottle):
return self._mount_app(prefix, app, **options)
else:
return self._mount_wsgi(prefix, app, **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes:
route.reset()
if DEBUG:
for route in routes:
route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route`, urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self,
path=None,
method='GET',
callback=None,
name=None,
apply=None,
skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``<name>`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, str):
callback = load(callback) # type: Callable
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback,
name=name,
plugins=plugins,
skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500, callback=None):
""" Register an output handler for a HTTP error code. Can
be used as a decorator or called directly ::
def error_handler_500(error):
return 'error_handler_500'
app.error(code=500, callback=error_handler_500)
@app.error(404)
def error_handler_404(error):
return 'error_handler_404'
"""
def decorator(callback):
if isinstance(callback, str): callback = load(callback)
self.error_handler[int(code)] = callback
return callback
return decorator(callback) if callback else decorator
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res, template_settings=dict(name='__ERROR_PAGE_TEMPLATE')))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
environ['PATH_INFO'] = _wsgi_recode(path)
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = None
try:
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
out = route.call(**args)
except HTTPResponse as E:
out = E
finally:
if isinstance(out, HTTPResponse):
out.apply(response)
try:
self.trigger_hook('after_request')
except HTTPResponse as E:
out = E
out.apply(response)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
_try_close(out)
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
environ['wsgi.errors'].flush()
environ['bottle.exc_info'] = sys.exc_info()
out = HTTPError(500, "Internal Server Error", E, stacktrace)
out.apply(response)
return out
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, bytes/bytearray, str, dict, HTTPResponse, HTTPError,
file-like, iterable of bytes/bytearray or str instances.
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list)) and isinstance(out[0], (bytes, str)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, str):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code,
self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
_try_close(out)
return self._cast('')
except HTTPResponse as E:
first = E
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as error:
_try_close(out)
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', error, format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, str):
encoder = lambda x: x.encode(response.charset)
new_iter = map(encoder, itertools.chain([first], iout))
else:
_try_close(out)
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
out = None
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304) \
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
exc_info = environ.get('bottle.exc_info')
if exc_info is not None:
del environ['bottle.exc_info']
start_response(response._wsgi_status_line(), response.headerlist, exc_info)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception as E:
_try_close(out)
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(E)), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
environ['wsgi.errors'].flush()
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
def __setattr__(self, name, value):
if name in self.__dict__:
raise AttributeError("Attribute %s already defined. Plugin conflict?" % name)
object.__setattr__(self, name, value)
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
| Bottle |
python | openai__openai-python | src/openai/_resource.py | {
"start": 663,
"end": 1100
} | class ____:
_client: AsyncOpenAI
def __init__(self, client: AsyncOpenAI) -> None:
self._client = client
self._get = client.get
self._post = client.post
self._patch = client.patch
self._put = client.put
self._delete = client.delete
self._get_api_list = client.get_api_list
async def _sleep(self, seconds: float) -> None:
await anyio.sleep(seconds)
| AsyncAPIResource |
python | numba__numba | numba/tests/doc_examples/test_jitclass.py | {
"start": 207,
"end": 3057
} | class ____(TestCase):
def test_ex_jitclass(self):
# magictoken.ex_jitclass.begin
import numpy as np
from numba import int32, float32 # import the types
from numba.experimental import jitclass
spec = [
('value', int32), # a simple scalar field
('array', float32[:]), # an array field
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
@staticmethod
def add(x, y):
return x + y
n = 21
mybag = Bag(n)
# magictoken.ex_jitclass.end
self.assertTrue(isinstance(mybag, Bag))
self.assertPreciseEqual(mybag.value, n)
np.testing.assert_allclose(mybag.array, np.zeros(n, dtype=np.float32))
self.assertPreciseEqual(mybag.size, n)
np.testing.assert_allclose(mybag.increment(3),
3 * np.ones(n, dtype=np.float32))
np.testing.assert_allclose(mybag.increment(6),
9 * np.ones(n, dtype=np.float32))
self.assertPreciseEqual(mybag.add(1, 1), 2)
self.assertPreciseEqual(Bag.add(1, 2), 3)
def test_ex_jitclass_type_hints(self):
# magictoken.ex_jitclass_type_hints.begin
from typing import List
from numba.experimental import jitclass
from numba.typed import List as NumbaList
@jitclass
class Counter:
value: int
def __init__(self):
self.value = 0
def get(self) -> int:
ret = self.value
self.value += 1
return ret
@jitclass
class ListLoopIterator:
counter: Counter
items: List[float]
def __init__(self, items: List[float]):
self.items = items
self.counter = Counter()
def get(self) -> float:
idx = self.counter.get() % len(self.items)
return self.items[idx]
items = NumbaList([3.14, 2.718, 0.123, -4.])
loop_itr = ListLoopIterator(items)
# magictoken.ex_jitclass_type_hints.end
for idx in range(10):
self.assertEqual(loop_itr.counter.value, idx)
self.assertAlmostEqual(loop_itr.get(), items[idx % len(items)])
self.assertEqual(loop_itr.counter.value, idx + 1)
if __name__ == '__main__':
unittest.main()
| DocsJitclassUsageTest |
python | huggingface__transformers | tests/models/encodec/test_modeling_encodec.py | {
"start": 63312,
"end": 70338
} | class ____(unittest.TestCase):
@parameterized.expand(
[
(f"{os.path.basename(model_id)}_{bandwidth.replace('.', 'p')}", model_id, bandwidth)
for model_id, v in EXPECTED_ENCODER_CODES.items()
for bandwidth in v
]
)
def test_integration(self, name, model_id, bandwidth):
# load model
model = EncodecModel.from_pretrained(model_id).to(torch_device)
processor = AutoProcessor.from_pretrained(model_id)
# load audio
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
audio_array = librispeech_dummy[0]["audio"]["array"]
if model.config.audio_channels > 1:
audio_array = np.array([audio_array] * model.config.audio_channels)
inputs = processor(
raw_audio=audio_array,
sampling_rate=processor.sampling_rate,
return_tensors="pt",
padding=True,
).to(torch_device)
model = model.eval()
with torch.no_grad():
# Compare encoder outputs with expected values
encoded_frames = model.encode(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth))
codes = torch.cat([encoded[0] for encoded in encoded_frames["audio_codes"]], dim=-1).unsqueeze(0)
torch.testing.assert_close(
codes[..., : EXPECTED_ENCODER_CODES[model_id][bandwidth].shape[-1]],
EXPECTED_ENCODER_CODES[model_id][bandwidth].to(torch_device),
rtol=1e-4,
atol=1e-4,
)
if EXPECTED_ENCODER_SCALES[model_id][bandwidth] is not None:
scales = torch.tensor([encoded[0].squeeze() for encoded in encoded_frames["audio_scales"]])
torch.testing.assert_close(scales, EXPECTED_ENCODER_SCALES[model_id][bandwidth], rtol=1e-4, atol=1e-4)
# Compare decoder outputs with expected values
decoded_frames = model.decode(
encoded_frames["audio_codes"],
encoded_frames["audio_scales"],
inputs["padding_mask"],
last_frame_pad_length=encoded_frames["last_frame_pad_length"],
)
torch.testing.assert_close(
decoded_frames["audio_values"][0][..., : EXPECTED_DECODER_OUTPUTS[model_id][bandwidth].shape[-1]],
EXPECTED_DECODER_OUTPUTS[model_id][bandwidth].to(torch_device),
rtol=1e-4,
atol=1e-4,
)
# Compare codec error with expected values
codec_error = compute_rmse(decoded_frames["audio_values"], inputs["input_values"])
torch.testing.assert_close(codec_error, EXPECTED_CODEC_ERROR[model_id][bandwidth], rtol=1e-4, atol=1e-4)
# make sure forward and enc-dec give same result
full_enc = model(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth))
torch.testing.assert_close(
full_enc["audio_values"],
decoded_frames["audio_values"],
rtol=1e-4,
atol=1e-4,
)
@parameterized.expand(
[
(f"{os.path.basename(model_id)}_{bandwidth.replace('.', 'p')}", model_id, bandwidth)
for model_id, v in EXPECTED_ENCODER_CODES_BATCH.items()
for bandwidth in v
]
)
def test_batch(self, name, model_id, bandwidth):
# load model
model = EncodecModel.from_pretrained(model_id).to(torch_device)
processor = AutoProcessor.from_pretrained(model_id)
# load audio
librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
n_channels = model.config.audio_channels
if n_channels == 1:
audio_samples = [audio_sample["array"] for audio_sample in librispeech_dummy[-2:]["audio"]]
else:
audio_samples = []
for _sample in librispeech_dummy[-2:]["audio"]:
# concatenate mono channels to target number of channels
audio_array = np.concatenate([_sample["array"][np.newaxis]] * n_channels, axis=0)
audio_samples.append(audio_array)
inputs = processor(
raw_audio=audio_samples,
sampling_rate=processor.sampling_rate,
return_tensors="pt",
padding=True,
).to(torch_device)
# apply model
model = model.eval()
with torch.no_grad():
# Compare encoder outputs with expected values
encoded_frames = model.encode(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth))
codes = encoded_frames["audio_codes"].permute(1, 2, 0, 3)
codes = codes.reshape(codes.size(0), codes.size(1), -1)
torch.testing.assert_close(
codes[..., : EXPECTED_ENCODER_CODES_BATCH[model_id][bandwidth].shape[-1]],
EXPECTED_ENCODER_CODES_BATCH[model_id][bandwidth].to(torch_device),
rtol=1e-4,
atol=1e-4,
)
if EXPECTED_ENCODER_SCALES_BATCH[model_id][bandwidth] is not None:
scales = torch.stack(encoded_frames["audio_scales"])
torch.testing.assert_close(
scales, EXPECTED_ENCODER_SCALES_BATCH[model_id][bandwidth].to(torch_device), rtol=1e-4, atol=1e-4
)
# Compare decoder outputs with expected values
decoded_frames = model.decode(
encoded_frames["audio_codes"],
encoded_frames["audio_scales"],
inputs["padding_mask"],
last_frame_pad_length=encoded_frames["last_frame_pad_length"],
)
torch.testing.assert_close(
decoded_frames["audio_values"][..., : EXPECTED_DECODER_OUTPUTS_BATCH[model_id][bandwidth].shape[-1]],
EXPECTED_DECODER_OUTPUTS_BATCH[model_id][bandwidth].to(torch_device),
rtol=1e-4,
atol=1e-4,
)
# Compare codec error with expected values
codec_error = compute_rmse(decoded_frames["audio_values"], inputs["input_values"])
torch.testing.assert_close(
codec_error, EXPECTED_CODEC_ERROR_BATCH[model_id][bandwidth], rtol=1e-4, atol=1e-4
)
# make sure forward and enc-dec give same result
input_values_dec = model(inputs["input_values"], inputs["padding_mask"], bandwidth=float(bandwidth))
torch.testing.assert_close(
input_values_dec["audio_values"], decoded_frames["audio_values"], rtol=1e-4, atol=1e-4
)
| EncodecIntegrationTest |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_sharding.py | {
"start": 930,
"end": 11244
} | class ____(object):
"""An object use to hold the sharding policy for a Tensor."""
def __init__(self):
self._number_of_shards = None
self._number_of_partitions = 1
self._shard_dimension = None
self._frozen = False
def __str__(self):
if self.number_of_shards is None or self.shard_dimension is None:
return "ShardingPolicy(unset)"
else:
return ("ShardingPolicy(%d shards dimension %d)" %
(self.number_of_shards, self.shard_dimension))
def _fill_default_values(self):
if self._number_of_shards is None:
self._number_of_shards = _DEFAULT_NUMBER_OF_SHARDS
if self._shard_dimension is None:
self._shard_dimension = tensor_shape.as_dimension(
_DEFAULT_SHARD_DIMENSION)
def freeze(self):
"""Prevents further modification to the sharding policy.
Any values that have not been set when freeze is called are set to
defaults. If the ShardingPolicy is already frozen, this is a NoOp.
"""
if not self._frozen:
self._fill_default_values()
self._frozen = True
@property
def number_of_shards(self):
"""Returns the number of shards in the policy or None if unspecified."""
return self._number_of_shards
def set_number_of_shards(self, number_of_shards):
"""Sets the number of shards for the current policy.
If the policy has been frozen then number_of_shards must match the
existing setting.
Args:
number_of_shards: The number of shards to use in the policy.
Raises:
ValueError: If the policy has been frozen and number_of_shards
differs from the frozen value; or number_of_shards <= 0.
"""
if self._frozen:
if self._number_of_shards != number_of_shards:
raise ValueError(
f"Can't set sharding policy to use {number_of_shards} shards since "
f"it has been frozen to use {self._number_of_shards}")
else:
if number_of_shards > 0:
self._number_of_shards = number_of_shards
else:
raise ValueError(
f"Can't set sharding policy to use {number_of_shards} shards; "
"value must be > 0")
@property
def number_of_partitions(self):
"""Returns the number of partitions of the policy or None if unspecified."""
return self._number_of_partitions
def set_number_of_partitions(self, number_of_partitions):
"""Sets the number of partitions for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
number_of_partitions: The number of partitions to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value.
"""
if self._frozen:
if self._number_of_partitions != number_of_partitions:
raise ValueError(
f"Can't set number_of_partitions to {number_of_partitions} since "
f"it has been frozen to use {self._number_of_partitions}.")
else:
self._number_of_partitions = number_of_partitions
@property
def shard_dimension(self):
"""Returns the shard dimension of the policy or None if unspecified."""
return self._shard_dimension
def set_shard_dimension(self, shard_dimension):
"""Sets the shard dimension for the current policy.
If the policy has been frozen then shard_dimension must match the
existing setting.
Args:
shard_dimension: The shard dimension to use in the policy.
Raises:
ValueError: If the policy has been frozen and shard_dimension
differs from the frozen value, or shard_dimension can't be
interpreted as a Dimension.
"""
if self._frozen:
if self._shard_dimension != shard_dimension:
raise ValueError(
"Can't set shard dimension to %d since it has been frozen to "
"use %d." % (shard_dimension, self._shard_dimension))
else:
self._shard_dimension = tensor_shape.as_dimension(shard_dimension)
def merge(self, other):
"""Merges the policy of another policy into the current policy.
Args:
other: The policy to merge into this one.
Raises:
ValueError: If this policy has been frozen and the merge conflicts with
the frozen policy.
"""
if other.number_of_shards is not None:
self.set_number_of_shards(other.number_of_shards)
if other.shard_dimension is not None:
self.set_shard_dimension(other.shard_dimension)
def get_unpartitioned_shape(self, shape):
"""Returns the shape of an unpartitioned Tensor.
When given the shape of a 'sharded-size' Tensor, returns the shape
of the full shape of its unpartitioned Tensor.
Args:
shape: The shape of the sharded Tensor.
Returns:
The shape of the unpartitioned version of the Tensor.
Raises:
ValueError: if shape has unknown sharded dimension
"""
shape = tensor_shape.as_shape(shape)
dims = shape.as_list()
if (self._shard_dimension is None or self._number_of_partitions is None or
not dims):
return None
if dims[self._shard_dimension] is None:
raise ValueError(f"Shape {shape.as_list()} must have a fixed size for "
f"dimension {self._shard_dimension} that is known. ")
if self._number_of_partitions > 1:
dims[self._shard_dimension] *= self._number_of_partitions
return tensor_shape.as_shape(dims)
def get_sharded_shape(self, shape, shard_index=None):
"""Returns the shape of a shard of a full Tensor.
When given the shape of a 'full-size' Tensor, returns the shape of
the sub-Tensor after it has been sharded. Freezes the policy if it
has not yet been frozen.
Args:
shape: The shape of the full-size Tensor to be sharded.
shard_index: The index of the shard whose shape should be returned.
shard_index can be None for sharding policies that use the same shape
for every shard.
Returns:
The shape of the sharded version of the Tensor.
Raises:
ValueError: If shard_index is None when shards are of different
shapes; or shard_index is not None and
!(0<=shard_index<number_of_shards); or shape does not have at
least self.shard_dimension+1 dimensions; or the value of
shape's shard dimension is not a multiple of
self.number_of_shards
"""
if self._shard_dimension is None or self._number_of_shards is None:
# Don't raise an error if the config is unset.
return None
if shard_index is not None:
if shard_index < 0 or shard_index >= self.number_of_shards:
raise ValueError(
f"Requested shard_index {shard_index}, but shard_index must be in "
f"[0,{self._number_of_shards}).")
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError(f"Shape {shape} must be a known shape.")
if ndims <= self._shard_dimension:
raise ValueError(
f"Shape {shape.as_list()} does not contain shard_dimension "
f"{self._shard_dimension}")
dims = shape.as_list()
if dims[self._shard_dimension] is None:
raise ValueError(
f"Shape {shape.as_list()} must have a fixed size for dimension "
f"{self._shard_dimension} that is known at construction time.")
if (dims[self._shard_dimension] % self._number_of_shards) != 0:
raise ValueError(
f"Shape {shape.as_list()} cannot be sharded {self._number_of_shards} "
f"ways along dimension {self._shard_dimension}")
dims[self._shard_dimension] //= self._number_of_shards
return tensor_shape.TensorShape(dims)
def _unshard_shape(self, shape):
"""Return the unsharded shape that would generate a given sharded shape.
Args:
shape: the sharded shape to unshard
Returns:
The unsharded shape.
Raises:
ValueError: if shape is unknown or does not contain
self.shard_dimension
TypeError: if shape is not convertible to a TensorShape
"""
shape = tensor_shape.as_shape(shape)
if self._number_of_shards == 1:
# Don't do anything when there's only one shard.
return shape
ndims = shape.ndims
if ndims is None:
raise ValueError(f"Shape {shape} must be statically known.")
if ndims <= self._shard_dimension:
raise ValueError(f"Shape {shape.as_list()} does not contain "
f"shard_dimension {self._shard_dimension}. "
f"Rank is too small.")
dims = shape.as_list()
dims[self._shard_dimension] *= self._number_of_shards
return tensor_shape.TensorShape(dims)
def get_unsharded_shape(self, shapes):
"""Returns the shape of an unsharded Tensor given a list of shards.
When given a list of shapes of shards, returns the shape of the
unsharded Tensor that would generate the shards. Sets defaults for the
policy if number_of_shards or shard_dimension is None.
Args:
shapes: The shapes of the Tensor shards to be combined.
Returns:
The shape of the unsharded version of the Tensor.
Raises:
ValueError: if shapes is not a list of length
self.number_of_shards; or any element of shapes is not a valid
shape consistent with the sharding policy; or the list of
shapes is not a valid sharding of a full shape.
TypeError: if an element of shapes is not convertible to a
TensorShape
"""
self._fill_default_values()
if len(shapes) != self.number_of_shards:
raise ValueError(
f"Shapes {shapes} is length {len(shapes)} but must be a list of "
f"length number_of_shards={self.number_of_shards}")
unsharded_shapes = [self._unshard_shape(s) for s in shapes]
for i in range(self.number_of_shards - 1):
if not unsharded_shapes[i].is_compatible_with(
unsharded_shapes[self.number_of_shards - 1]):
raise ValueError(
f"Sharded shapes {shapes} are not consistent shards of a full shape "
f"sharded {self.number_of_shards} ways along "
f"dimension {self.shard_dimension}.")
return unsharded_shapes[0]
| ShardingPolicy |
python | xlwings__xlwings | xlwings/main.py | {
"start": 121073,
"end": 125466
} | class ____:
"""
The picture object is a member of the :meth:`pictures <xlwings.main.Pictures>`
collection:
>>> import xlwings as xw
>>> sht = xw.books['Book1'].sheets[0]
>>> sht.pictures[0] # or sht.charts['PictureName']
<Picture 'Picture 1' in <Sheet [Book1]Sheet1>>
.. versionchanged:: 0.9.0
"""
def __init__(self, impl=None):
self.impl = impl
@property
def api(self):
"""
Returns the native object (``pywin32`` or ``appscript`` obj) of the engine
being used.
.. versionadded:: 0.9.0
"""
return self.impl.api
@property
def parent(self):
"""
Returns the parent of the picture.
.. versionadded:: 0.9.0
"""
return Sheet(impl=self.impl.parent)
@property
def name(self):
"""
Returns or sets the name of the picture.
.. versionadded:: 0.5.0
"""
return self.impl.name
@name.setter
def name(self, value):
if value in self.parent.pictures:
if value == self.name:
return
else:
raise ShapeAlreadyExists(
f"'{value}' is already present on {self.parent.name}."
)
self.impl.name = value
@property
def left(self):
"""
Returns or sets the number of points that represent the horizontal position
of the picture.
.. versionadded:: 0.5.0
"""
return self.impl.left
@left.setter
def left(self, value):
self.impl.left = value
@property
def top(self):
"""
Returns or sets the number of points that represent the vertical position
of the picture.
.. versionadded:: 0.5.0
"""
return self.impl.top
@top.setter
def top(self, value):
self.impl.top = value
@property
def width(self):
"""
Returns or sets the number of points that represent the width of the picture.
.. versionadded:: 0.5.0
"""
return self.impl.width
@width.setter
def width(self, value):
self.impl.width = value
@property
def height(self):
"""
Returns or sets the number of points that represent the height of the picture.
.. versionadded:: 0.5.0
"""
return self.impl.height
@height.setter
def height(self, value):
self.impl.height = value
def delete(self):
"""
Deletes the picture.
.. versionadded:: 0.5.0
"""
self.impl.delete()
def __eq__(self, other):
return (
isinstance(other, Picture)
and other.parent == self.parent
and other.name == self.name
)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Picture '{0}' in {1}>".format(self.name, self.parent)
def update(self, image, format=None, export_options=None):
"""
Replaces an existing picture with a new one, taking over the attributes of the
existing picture.
Arguments
---------
image : str or path-like object or matplotlib.figure.Figure
Either a filepath or a Matplotlib figure object.
format : str, default None
See under ``Pictures.add()``
export_options : dict, default None
See under ``Pictures.add()``
.. versionadded:: 0.5.0
"""
filename, is_temp_file = utils.process_image(
image,
format="png" if not format else format,
export_options=export_options,
)
picture = Picture(impl=self.impl.update(filename))
# Cleanup temp file
if is_temp_file:
try:
os.unlink(filename)
except: # noqa: E722
pass
return picture
@property
def lock_aspect_ratio(self):
"""
``True`` will keep the original proportion,
``False`` will allow you to change height and width independently of each other
(read/write).
.. versionadded:: 0.24.0
"""
return self.impl.lock_aspect_ratio
@lock_aspect_ratio.setter
def lock_aspect_ratio(self, value):
self.impl.lock_aspect_ratio = value
| Picture |
python | kamyu104__LeetCode-Solutions | Python/maximum-profitable-triplets-with-increasing-prices-i.py | {
"start": 4261,
"end": 6724
} | class ____(object):
def maxProfit(self, prices, profits):
"""
:type prices: List[int]
:type profits: List[int]
:rtype: int
"""
NEG_INF = float("-inf")
# Range Maximum Query
class SegmentTree(object):
def __init__(self, N,
build_fn=lambda _: None,
query_fn=lambda x, y: max(x, y),
update_fn=lambda x, y: max(x, y)):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(self.tree[x], h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def query(self, L, R):
if L > R:
return None
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
price_to_idx = {x:i for i, x in enumerate(sorted(set(prices)))}
result = NEG_INF
st1, st2 = SegmentTree(len(price_to_idx)), SegmentTree(len(price_to_idx))
for price, profit in itertools.izip(prices, profits):
mx2 = st2.query(0, price_to_idx[price]-1)
if mx2 is not None:
result = max(result, mx2+profit)
st1.update(price_to_idx[price], profit)
mx1 = st1.query(0, price_to_idx[price]-1)
if mx1 is not None:
st2.update(price_to_idx[price], mx1+profit)
return result if result != NEG_INF else -1
# Time: O(nlogn)
# Space: O(n)
# prefix sum, segment tree
| Solution4 |
python | django__django | tests/gis_tests/geoapp/test_feeds.py | {
"start": 329,
"end": 4448
} | class ____(TestCase):
fixtures = ["initial"]
@classmethod
def setUpTestData(cls):
Site(id=settings.SITE_ID, domain="example.com", name="example.com").save()
def assertChildNodes(self, elem, expected):
"Taken from syndication/tests.py."
actual = {n.nodeName for n in elem.childNodes}
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get("/feeds/rss1/").content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get("/feeds/rss2/").content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(
feed2.getElementsByTagName("channel")[0],
[
"title",
"link",
"description",
"language",
"lastBuildDate",
"item",
"georss:box",
"atom:link",
],
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(
feed.getAttribute("xmlns:georss"), "http://www.georss.org/georss"
)
chan = feed.getElementsByTagName("channel")[0]
items = chan.getElementsByTagName("item")
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(
item, ["title", "link", "description", "guid", "georss:point"]
)
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get("/feeds/atom1/").content)
doc2 = minidom.parseString(self.client.get("/feeds/atom2/").content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(
feed2, ["title", "link", "id", "updated", "entry", "georss:box"]
)
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(
feed.getAttribute("xmlns:georss"), "http://www.georss.org/georss"
)
entries = feed.getElementsByTagName("entry")
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(
entry, ["title", "link", "id", "summary", "georss:point"]
)
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get("/feeds/w3cgeo1/").content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(
feed.getAttribute("xmlns:geo"), "http://www.w3.org/2003/01/geo/wgs84_pos#"
)
chan = feed.getElementsByTagName("channel")[0]
items = chan.getElementsByTagName("item")
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in
# the feed.
for item in items:
self.assertChildNodes(
item, ["title", "link", "description", "guid", "geo:lat", "geo:lon"]
)
# Boxes and Polygons aren't allowed in W3C Geo feeds.
with self.assertRaises(ValueError): # Box in <channel>
self.client.get("/feeds/w3cgeo2/")
with self.assertRaises(ValueError): # Polygons in <entry>
self.client.get("/feeds/w3cgeo3/")
| GeoFeedTest |
python | realpython__materials | python-protocol/linked_list.py | {
"start": 213,
"end": 647
} | class ____:
def __init__(
self,
value: int,
next_node: Optional["LinkedListNode"] = None,
):
self.value = value
self.next_node = next_node
def __str__(self) -> str:
return f"{self.value} -> {self.next_node}"
def print_linked_list(start_node: LinkedListNode):
print(start_node)
node3 = Node(3)
node2 = Node(2, node3)
node1 = Node(1, node2)
print_linked_list(node1)
| Node |
python | encode__django-rest-framework | rest_framework/metadata.py | {
"start": 633,
"end": 918
} | class ____:
def determine_metadata(self, request, view):
"""
Return a dictionary of metadata about the view.
Used to return responses for OPTIONS requests.
"""
raise NotImplementedError(".determine_metadata() must be overridden.")
| BaseMetadata |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 219926,
"end": 221392
} | class ____(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
| TIPCThreadableTest |
python | openai__gym | tests/wrappers/test_nested_dict.py | {
"start": 2987,
"end": 4067
} | class ____:
@pytest.mark.parametrize("observation_space, flat_shape", NESTED_DICT_TEST_CASES)
def test_nested_dicts_size(self, observation_space, flat_shape):
env = FakeEnvironment(observation_space=observation_space)
# Make sure we are testing the right environment for the test.
observation_space = env.observation_space
assert isinstance(observation_space, Dict)
wrapped_env = FlattenObservation(FilterObservation(env, env.obs_keys))
assert wrapped_env.observation_space.shape == flat_shape
assert wrapped_env.observation_space.dtype == np.float32
@pytest.mark.parametrize("observation_space, flat_shape", NESTED_DICT_TEST_CASES)
def test_nested_dicts_ravel(self, observation_space, flat_shape):
env = FakeEnvironment(observation_space=observation_space)
wrapped_env = FlattenObservation(FilterObservation(env, env.obs_keys))
obs, info = wrapped_env.reset()
assert obs.shape == wrapped_env.observation_space.shape
assert isinstance(info, dict)
| TestNestedDictWrapper |
python | getsentry__sentry | src/sentry/auth/services/auth/model.py | {
"start": 7577,
"end": 7729
} | class ____(RpcModel):
organization_id: int = -1
auth_provider: RpcAuthProvider | None = None
has_api_key: bool = False
| RpcOrganizationAuthConfig |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1007502,
"end": 1007710
} | class ____(VegaLiteSchema):
"""RangeScheme schema wrapper."""
_schema = {"$ref": "#/definitions/RangeScheme"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| RangeScheme |
python | pypa__warehouse | tests/unit/oidc/models/test_activestate.py | {
"start": 12803,
"end": 14361
} | class ____:
def test_reify_does_not_exist_yet(self, db_request):
pending_publisher: PendingActiveStatePublisher = (
PendingActiveStatePublisherFactory.create()
)
assert (
db_request.db.query(ActiveStatePublisher)
.filter_by(
organization=pending_publisher.organization,
activestate_project_name=pending_publisher.activestate_project_name,
actor_id=pending_publisher.actor_id,
actor=pending_publisher.actor,
)
.one_or_none()
is None
)
publisher = pending_publisher.reify(db_request.db)
assert isinstance(publisher, ActiveStatePublisher)
assert pending_publisher in db_request.db.deleted
assert publisher.organization == pending_publisher.organization
assert publisher.sub == pending_publisher.sub
def test_reify_already_exists(self, db_request):
existing_publisher: ActiveStatePublisher = ActiveStatePublisherFactory.create()
pending_publisher = PendingActiveStatePublisherFactory.create(
organization=existing_publisher.organization,
activestate_project_name=existing_publisher.activestate_project_name,
actor_id=existing_publisher.actor_id,
actor=existing_publisher.actor,
)
publisher = pending_publisher.reify(db_request.db)
assert existing_publisher == publisher
assert pending_publisher in db_request.db.deleted
| TestPendingActiveStatePublisher |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/hpcviewer/package.py | {
"start": 227,
"end": 609
} | class ____(AutotoolsPackage):
"""Uses version-test-pkg, as a build dependency"""
homepage = "http://www.spack.org"
url = "http://www.spack.org/downloads/aml-1.0.tar.gz"
version("2019.02", md5="0123456789abcdef0123456789abcdef")
depends_on("java@11:", type=("build", "run"), when="@2021.0:")
depends_on("java@8", type=("build", "run"), when="@:2020")
| Hpcviewer |
python | jazzband__django-oauth-toolkit | oauth2_provider/validators.py | {
"start": 611,
"end": 5549
} | class ____(URIValidator):
# TODO: find a way to get these associated with their form fields in place of passing name
# TODO: submit PR to get `cause` included in the parent class ValidationError params`
def __init__(
self,
schemes,
name,
allow_path=False,
allow_query=False,
allow_fragments=False,
allow_hostname_wildcard=False,
):
"""
:param schemes: List of allowed schemes. E.g.: ["https"]
:param name: Name of the validated URI. It is required for validation message. E.g.: "Origin"
:param allow_path: If URI can contain path part
:param allow_query: If URI can contain query part
:param allow_fragments: If URI can contain fragments part
"""
super().__init__(schemes=schemes)
self.name = name
self.allow_path = allow_path
self.allow_query = allow_query
self.allow_fragments = allow_fragments
self.allow_hostname_wildcard = allow_hostname_wildcard
def __call__(self, value):
value = force_str(value)
try:
scheme, netloc, path, query, fragment = urlsplit(value)
except ValueError as e:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": e},
)
# send better validation errors
if scheme not in self.schemes:
raise ValidationError(
"%(name)s URI Validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": "invalid_scheme"},
)
if query and not self.allow_query:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": "query string not allowed"},
)
if fragment and not self.allow_fragments:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": "fragment not allowed"},
)
if path and not self.allow_path:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": "path not allowed"},
)
if self.allow_hostname_wildcard and "*" in netloc:
domain_parts = netloc.split(".")
if netloc.count("*") > 1:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={
"name": self.name,
"value": value,
"cause": "only one wildcard is allowed in the hostname",
},
)
if not netloc.startswith("*"):
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={
"name": self.name,
"value": value,
"cause": "wildcards must be at the beginning of the hostname",
},
)
if len(domain_parts) < 3:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={
"name": self.name,
"value": value,
"cause": "wildcards cannot be in the top level or second level domain",
},
)
# strip the wildcard from the netloc, we'll reassamble the value later to pass to URI Validator
if netloc.startswith("*."):
netloc = netloc[2:]
else:
netloc = netloc[1:]
# domains cannot start with a hyphen, but can have them in the middle, so we strip hyphens
# after the wildcard so the final domain is valid and will succeed in URIVAlidator
if netloc.startswith("-"):
netloc = netloc[1:]
# we stripped the wildcard from the netloc and path if they were allowed and present since they would
# fail validation we'll reassamble the URI to pass to the URIValidator
reassambled_uri = f"{scheme}://{netloc}{path}"
if query:
reassambled_uri += f"?{query}"
if fragment:
reassambled_uri += f"#{fragment}"
try:
super().__call__(reassambled_uri)
except ValidationError as e:
raise ValidationError(
"%(name)s URI validation error. %(cause)s: %(value)s",
params={"name": self.name, "value": value, "cause": e},
)
| AllowedURIValidator |
python | astropy__astropy | astropy/modeling/physical_models.py | {
"start": 461,
"end": 8968
} | class ____(Fittable1DModel):
"""
Blackbody model using the Planck function.
Parameters
----------
temperature : `~astropy.units.Quantity` ['temperature']
Blackbody temperature.
scale : float or `~astropy.units.Quantity` ['dimensionless']
Scale factor. If dimensionless, input units will assumed
to be in Hz and output units in (erg / (cm ** 2 * s * Hz * sr).
If not dimensionless, must be equivalent to either
(erg / (cm ** 2 * s * Hz * sr) or erg / (cm ** 2 * s * AA * sr),
in which case the result will be returned in the requested units and
the scale will be stripped of units (with the float value applied).
Notes
-----
Model formula:
.. math:: B_{\\nu}(T) = A \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody(temperature=5000*u.K)
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.53254685e-05 erg / (Hz s sr cm2)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav)
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.nu_max.to(u.AA, equivalencies=u.spectral()).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a scale.
temperature = Parameter(
default=5000.0, min=0, unit=u.K, description="Blackbody temperature"
)
scale = Parameter(default=1.0, min=0, description="Scale factor")
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz or wavelengths
# in AA (depending on the choice of output units controlled by units on scale
# and stored in self._output_units during init).
_input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {"x": u.spectral()}
# Store the native units returned by B_nu equation
_native_units = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# Store the base native output units. If scale is not dimensionless, it
# must be equivalent to one of these. If equivalent to SLAM, then
# input_units will expect AA for 'x', otherwise Hz.
_native_output_units = {
"SNU": u.erg / (u.cm**2 * u.s * u.Hz * u.sr),
"SLAM": u.erg / (u.cm**2 * u.s * u.AA * u.sr),
}
def __init__(self, *args, **kwargs):
scale = kwargs.get("scale")
# Support scale with non-dimensionless unit by stripping the unit and
# storing as self._output_units.
if hasattr(scale, "unit") and not scale.unit.is_equivalent(
u.dimensionless_unscaled
):
output_units = scale.unit
if not output_units.is_equivalent(
self._native_units, u.spectral_density(1 * u.AA)
):
raise ValueError(
"scale units not dimensionless or in "
f"surface brightness: {output_units}"
)
kwargs["scale"] = scale.value
self._output_units = output_units
else:
self._output_units = self._native_units
super().__init__(*args, **kwargs)
def evaluate(self, x, temperature, scale):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['frequency']
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz (or AA if `scale` was initialized with units
equivalent to erg / (cm ** 2 * s * AA * sr)).
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
scale : float, `~numpy.ndarray`, or `~astropy.units.Quantity` ['dimensionless']
Desired scale for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``scale``.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
if not isinstance(temperature, u.Quantity):
in_temp = u.Quantity(temperature, u.K)
else:
in_temp = temperature
if not isinstance(x, u.Quantity):
# then we assume it has input_units which depends on the
# requested output units (either Hz or AA)
in_x = u.Quantity(x, self.input_units["x"])
else:
in_x = x
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(in_temp, u.K)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError(f"Temperature should be positive: {temp}")
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn(
"Input contains invalid wavelength/frequency value(s)",
AstropyUserWarning,
)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
# Calculate blackbody flux
bb_nu = 2.0 * const.h * freq**3 / (const.c**2 * boltzm1) / u.sr
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
if not hasattr(scale, "unit"):
# during fitting, scale will be passed without units
# but we still need to convert from the input dimensionless
# to dimensionless unscaled
scale = scale * self.scale.unit
scale = scale.to(u.dimensionless_unscaled).value
# NOTE: scale is already stripped of any input units
y = scale * bb_nu.to(self._output_units, u.spectral_density(freq))
# If the temperature parameter has no unit, we should return a unitless
# value. This occurs for instance during fitting, since we drop the
# units temporarily.
if hasattr(temperature, "unit"):
return y
return y.value
@property
def input_units(self):
# The input units are those of the 'x' value, which will depend on the
# units compatible with the expected output units.
if self._output_units.is_equivalent(self._native_output_units["SNU"]):
return {self.inputs[0]: u.Hz}
else:
# only other option is equivalent with SLAM
return {self.inputs[0]: u.AA}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {"temperature": u.K}
@property
def bolometric_flux(self):
"""Bolometric flux."""
if self.scale.unit is not None:
# Will be dimensionless at this point, but may not be dimensionless_unscaled
scale = self.scale.quantity.to(u.dimensionless_unscaled)
else:
scale = self.scale.value
# bolometric flux in the native units of the planck function
native_bolflux = scale * const.sigma_sb * self.temperature**4 / np.pi
# return in more "astro" units
return native_bolflux.to(u.erg / (u.cm**2 * u.s))
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
@property
def nu_max(self):
"""Peak frequency when the curve is expressed as power density."""
return 2.8214391 * const.k_B * self.temperature / const.h
| BlackBody |
python | pola-rs__polars | py-polars/src/polars/series/ext.py | {
"start": 399,
"end": 1525
} | class ____:
"""Series.ext namespace."""
_accessor = "ext"
def __init__(self, series: Series) -> None:
self._s: PySeries = series._s
@unstable()
def to(self, dtype: PolarsDataType) -> Series:
"""
Create a Series with an extension `dtype`.
The input series must have the storage type of the extension dtype.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
"""
assert isinstance(dtype, dt.BaseExtension)
return wrap_s(self._s.ext_to(dtype))
@unstable()
def storage(self) -> Series:
"""
Get the storage values of a Series with an extension data type.
If the input series does not have an extension data type, it is returned as-is.
.. warning::
This functionality is currently considered **unstable**. It may be
changed at any point without it being considered a breaking change.
"""
return wrap_s(self._s.ext_storage())
| ExtensionNameSpace |
python | facelessuser__soupsieve | tests/util.py | {
"start": 778,
"end": 4232
} | class ____(unittest.TestCase):
"""Test case."""
def wrap_xhtml(self, html):
"""Wrap HTML content with XHTML header and body."""
return f"""
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
</head>
<body>
{html}
</body>
</html>
"""
def setUp(self):
"""Setup."""
sv.purge()
def purge(self):
"""Purge cache."""
sv.purge()
def compile_pattern(self, selectors, namespaces=None, custom=None, flags=0):
"""Compile pattern."""
print('PATTERN: ', selectors)
flags |= sv.DEBUG
return sv.compile(selectors, namespaces=namespaces, custom=custom, flags=flags)
def soup(self, markup, parser):
"""Get soup."""
print('\n====PARSER: ', parser)
return bs4.BeautifulSoup(textwrap.dedent(markup.replace('\r\n', '\n')), parser)
def get_parsers(self, flags):
"""Get parsers."""
mode = flags & 0x3F
if mode == HTML:
parsers = ('html5lib', 'lxml', 'html.parser')
elif mode == PYHTML:
parsers = ('html.parser',)
elif mode == LXML_HTML:
parsers = ('lxml',)
elif mode in (HTML5, 0):
parsers = ('html5lib',)
elif mode in (XHTML, XML):
parsers = ('xml',)
return parsers
def assert_raises(self, pattern, exception, namespace=None, custom=None):
"""Assert raises."""
print('----Running Assert Test----')
with self.assertRaises(exception):
self.compile_pattern(pattern, namespaces=namespace, custom=custom)
def assert_selector(self, markup, selectors, expected_ids, namespaces=None, custom=None, flags=0):
"""Assert selector."""
if namespaces is None:
namespaces = {}
parsers = self.get_parsers(flags)
print('----Running Selector Test----')
selector = self.compile_pattern(selectors, namespaces, custom)
for parser in available_parsers(*parsers):
soup = self.soup(markup, parser)
# print(soup)
ids = []
for el in selector.select(soup):
print('TAG: ', el.name)
ids.append(el.attrs['id'])
self.assertEqual(sorted(ids), sorted(expected_ids))
def available_parsers(*parsers):
"""
Filter a list of parsers, down to the available ones.
If there are none, report the test as skipped to pytest.
"""
ran_test = False
for parser in parsers:
if (
(parser in ('xml', 'lxml') and not LXML_PRESENT) or
(parser == 'html5lib' and not HTML5LIB_PRESENT)
):
print(f'SKIPPED {parser}, not installed')
else:
ran_test = True
yield parser
if not ran_test:
raise pytest.skip('no available parsers')
def requires_lxml(test):
"""Decorator that marks a test as requiring LXML."""
return pytest.mark.skipif(
not LXML_PRESENT, reason='test requires lxml')(test)
def requires_html5lib(test):
"""Decorator that marks a test as requiring html5lib."""
return pytest.mark.skipif(
not HTML5LIB_PRESENT, reason='test requires html5lib')(test)
| TestCase |
python | sympy__sympy | sympy/polys/matrices/exceptions.py | {
"start": 1066,
"end": 1351
} | class ____(DMError):
"""The value passed is invalid"""
pass
__all__ = [
'DMError', 'DMBadInputError', 'DMDomainError', 'DMFormatError',
'DMRankError', 'DMShapeError', 'DMNotAField',
'DMNonInvertibleMatrixError', 'DMNonSquareMatrixError', 'DMValueError'
]
| DMValueError |
python | davidhalter__jedi | jedi/inference/signature.py | {
"start": 3960,
"end": 4635
} | class ____(AbstractSignature):
def __init__(self, value, return_string, function_value=None, is_bound=False):
super().__init__(value, is_bound)
self._return_string = return_string
self.__function_value = function_value
@property
def annotation_string(self):
return self._return_string
@property
def _function_value(self):
if self.__function_value is None:
return self.value
return self.__function_value
def bind(self, value):
return BuiltinSignature(
value, self._return_string,
function_value=self.value,
is_bound=True
)
| BuiltinSignature |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_glue.py | {
"start": 3111,
"end": 4529
} | class ____(TestGlueDataQualityCustomWaitersBase):
WAITER_NAME = "data_quality_rule_recommendation_run_complete"
@pytest.fixture
def mock_get_job(self):
with mock.patch.object(self.client, "get_data_quality_rule_recommendation_run") as mock_getter:
yield mock_getter
@pytest.mark.parametrize("state", GlueDataQualityRuleRecommendationRunSensor.SUCCESS_STATES)
def test_data_quality_rule_recommendation_run_complete(self, state, mock_get_job):
mock_get_job.return_value = {"Status": state}
GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait(RunId="run_id")
@pytest.mark.parametrize("state", GlueDataQualityRuleRecommendationRunSensor.FAILURE_STATES)
def test_data_quality_rule_recommendation_run_failed(self, state, mock_get_job):
mock_get_job.return_value = {"Status": state}
with pytest.raises(botocore.exceptions.WaiterError):
GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait(RunId="run_id")
def test_data_quality_rule_recommendation_run_wait(self, mock_get_job):
wait = {"Status": "RUNNING"}
success = {"Status": "SUCCEEDED"}
mock_get_job.side_effect = [wait, wait, success]
GlueDataQualityHook().get_waiter(self.WAITER_NAME).wait(
RunIc="run_id", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
| TestGlueDataQualityRuleRecommendationRunCompleteWaiter |
python | pypa__setuptools | setuptools/tests/test_setuptools.py | {
"start": 3797,
"end": 9008
} | class ____:
def setup_method(self, method):
self.e1 = Extension('bar.ext', ['bar.c'])
self.e2 = Extension('c.y', ['y.c'])
self.dist = makeSetup(
packages=['a', 'a.b', 'a.b.c', 'b', 'c'],
py_modules=['b.d', 'x'],
ext_modules=(self.e1, self.e2),
package_dir={},
)
def testDistroType(self):
assert isinstance(self.dist, setuptools.dist.Distribution)
def testExcludePackage(self):
self.dist.exclude_package('a')
assert self.dist.packages == ['b', 'c']
self.dist.exclude_package('b')
assert self.dist.packages == ['c']
assert self.dist.py_modules == ['x']
assert self.dist.ext_modules == [self.e1, self.e2]
self.dist.exclude_package('c')
assert self.dist.packages == []
assert self.dist.py_modules == ['x']
assert self.dist.ext_modules == [self.e1]
# test removals from unspecified options
makeSetup().exclude_package('x')
def testIncludeExclude(self):
# remove an extension
self.dist.exclude(ext_modules=[self.e1])
assert self.dist.ext_modules == [self.e2]
# add it back in
self.dist.include(ext_modules=[self.e1])
assert self.dist.ext_modules == [self.e2, self.e1]
# should not add duplicate
self.dist.include(ext_modules=[self.e1])
assert self.dist.ext_modules == [self.e2, self.e1]
def testExcludePackages(self):
self.dist.exclude(packages=['c', 'b', 'a'])
assert self.dist.packages == []
assert self.dist.py_modules == ['x']
assert self.dist.ext_modules == [self.e1]
def testEmpty(self):
dist = makeSetup()
dist.include(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
dist = makeSetup()
dist.exclude(packages=['a'], py_modules=['b'], ext_modules=[self.e2])
def testContents(self):
assert self.dist.has_contents_for('a')
self.dist.exclude_package('a')
assert not self.dist.has_contents_for('a')
assert self.dist.has_contents_for('b')
self.dist.exclude_package('b')
assert not self.dist.has_contents_for('b')
assert self.dist.has_contents_for('c')
self.dist.exclude_package('c')
assert not self.dist.has_contents_for('c')
def testInvalidIncludeExclude(self):
with pytest.raises(DistutilsSetupError):
self.dist.include(nonexistent_option='x')
with pytest.raises(DistutilsSetupError):
self.dist.exclude(nonexistent_option='x')
with pytest.raises(DistutilsSetupError):
self.dist.include(packages={'x': 'y'})
with pytest.raises(DistutilsSetupError):
self.dist.exclude(packages={'x': 'y'})
with pytest.raises(DistutilsSetupError):
self.dist.include(ext_modules={'x': 'y'})
with pytest.raises(DistutilsSetupError):
self.dist.exclude(ext_modules={'x': 'y'})
with pytest.raises(DistutilsSetupError):
self.dist.include(package_dir=['q'])
with pytest.raises(DistutilsSetupError):
self.dist.exclude(package_dir=['q'])
@pytest.fixture
def example_source(tmpdir):
tmpdir.mkdir('foo')
(tmpdir / 'foo/bar.py').write('')
(tmpdir / 'readme.txt').write('')
return tmpdir
def test_findall(example_source):
found = list(setuptools.findall(str(example_source)))
expected = ['readme.txt', 'foo/bar.py']
expected = [example_source.join(fn) for fn in expected]
assert found == expected
def test_findall_curdir(example_source):
with example_source.as_cwd():
found = list(setuptools.findall())
expected = ['readme.txt', os.path.join('foo', 'bar.py')]
assert found == expected
@pytest.fixture
def can_symlink(tmpdir):
"""
Skip if cannot create a symbolic link
"""
link_fn = 'link'
target_fn = 'target'
try:
os.symlink(target_fn, link_fn)
except (OSError, NotImplementedError, AttributeError):
pytest.skip("Cannot create symbolic links")
os.remove(link_fn)
@pytest.mark.usefixtures("can_symlink")
def test_findall_missing_symlink(tmpdir):
with tmpdir.as_cwd():
os.symlink('foo', 'bar')
found = list(setuptools.findall())
assert found == []
@pytest.mark.xfail(reason="unable to exclude tests; #4475 #3260")
def test_its_own_wheel_does_not_contain_tests(setuptools_wheel):
with ZipFile(setuptools_wheel) as zipfile:
contents = [f.replace(os.sep, '/') for f in zipfile.namelist()]
for member in contents:
assert '/tests/' not in member
def test_wheel_includes_cli_scripts(setuptools_wheel):
with ZipFile(setuptools_wheel) as zipfile:
contents = [f.replace(os.sep, '/') for f in zipfile.namelist()]
assert any('cli-64.exe' in member for member in contents)
def test_wheel_includes_vendored_metadata(setuptools_wheel):
with ZipFile(setuptools_wheel) as zipfile:
contents = [f.replace(os.sep, '/') for f in zipfile.namelist()]
assert any(
re.search(r'_vendor/.*\.dist-info/METADATA', member) for member in contents
)
| TestDistro |
python | scipy__scipy | scipy/optimize/_differentiable_functions.py | {
"start": 16718,
"end": 18217
} | class ____:
"""
Wrapper class for Jacobian calculation
"""
def __init__(
self,
hess,
jac=None,
finite_diff_options=None,
):
self.jac = jac
self.hess = hess
self.finite_diff_options = finite_diff_options
self.nhev = 0
# number of jac evaluations consumed by finite difference
self.njev = 0
def __call__(self, x, v, J0=None, **kwds):
# Send a copy because the user may overwrite it.
# The user of this class might want `x` to remain unchanged.
if callable(self.hess):
self.nhev += 1
return self._callable_hess(x, v)
elif self.hess in FD_METHODS:
return self._fd_hess(x, v, J0=J0)
def _fd_hess(self, x, v, J0=None):
if J0 is None:
J0 = self.jac(x)
self.njev += 1
# H will be a LinearOperator
H = approx_derivative(self.jac_dot_v, x,
f0=J0.T.dot(v),
args=(v,),
**self.finite_diff_options)
return H
def jac_dot_v(self, x, v):
self.njev += 1
return self.jac(x).T.dot(v)
def _callable_hess(self, x, v):
H = self.hess(x, v)
if sps.issparse(H):
return sps.csr_array(H)
elif isinstance(H, LinearOperator):
return H
else:
return np.atleast_2d(np.asarray(H))
| _VectorHessWrapper |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4.py | {
"start": 1401,
"end": 12421
} | class ____(_FigureCanvasGTK, Gtk.DrawingArea):
required_interactive_framework = "gtk4"
supports_blit = False
manager_class = _api.classproperty(lambda cls: FigureManagerGTK4)
def __init__(self, figure=None):
super().__init__(figure=figure)
self.set_hexpand(True)
self.set_vexpand(True)
self._idle_draw_id = 0
self._rubberband_rect = None
self.set_draw_func(self._draw_func)
self.connect('resize', self.resize_event)
if _GTK_GE_4_12:
self.connect('realize', self._realize_event)
else:
self.connect('notify::scale-factor', self._update_device_pixel_ratio)
click = Gtk.GestureClick()
click.set_button(0) # All buttons.
click.connect('pressed', self.button_press_event)
click.connect('released', self.button_release_event)
self.add_controller(click)
key = Gtk.EventControllerKey()
key.connect('key-pressed', self.key_press_event)
key.connect('key-released', self.key_release_event)
self.add_controller(key)
motion = Gtk.EventControllerMotion()
motion.connect('motion', self.motion_notify_event)
motion.connect('enter', self.enter_notify_event)
motion.connect('leave', self.leave_notify_event)
self.add_controller(motion)
scroll = Gtk.EventControllerScroll.new(
Gtk.EventControllerScrollFlags.VERTICAL)
scroll.connect('scroll', self.scroll_event)
self.add_controller(scroll)
self.set_focusable(True)
css = Gtk.CssProvider()
style = '.matplotlib-canvas { background-color: white; }'
if Gtk.check_version(4, 9, 3) is None:
css.load_from_data(style, -1)
else:
css.load_from_data(style.encode('utf-8'))
style_ctx = self.get_style_context()
style_ctx.add_provider(css, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
style_ctx.add_class("matplotlib-canvas")
def destroy(self):
CloseEvent("close_event", self)._process()
def set_cursor(self, cursor):
# docstring inherited
self.set_cursor_from_name(_backend_gtk.mpl_to_gtk_cursor_name(cursor))
def _mpl_coords(self, xy=None):
"""
Convert the *xy* position of a GTK event, or of the current cursor
position if *xy* is None, to Matplotlib coordinates.
GTK use logical pixels, but the figure is scaled to physical pixels for
rendering. Transform to physical pixels so that all of the down-stream
transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
if xy is None:
surface = self.get_native().get_surface()
is_over, x, y, mask = surface.get_device_position(
self.get_display().get_default_seat().get_pointer())
else:
x, y = xy
x = x * self.device_pixel_ratio
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height - y * self.device_pixel_ratio
return x, y
def scroll_event(self, controller, dx, dy):
MouseEvent(
"scroll_event", self, *self._mpl_coords(), step=dy,
modifiers=self._mpl_modifiers(controller),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
return True
def button_press_event(self, controller, n_press, x, y):
MouseEvent(
"button_press_event", self, *self._mpl_coords((x, y)),
controller.get_current_button(),
modifiers=self._mpl_modifiers(controller),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
self.grab_focus()
def button_release_event(self, controller, n_press, x, y):
MouseEvent(
"button_release_event", self, *self._mpl_coords((x, y)),
controller.get_current_button(),
modifiers=self._mpl_modifiers(controller),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
def key_press_event(self, controller, keyval, keycode, state):
KeyEvent(
"key_press_event", self, self._get_key(keyval, keycode, state),
*self._mpl_coords(),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
return True
def key_release_event(self, controller, keyval, keycode, state):
KeyEvent(
"key_release_event", self, self._get_key(keyval, keycode, state),
*self._mpl_coords(),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
return True
def motion_notify_event(self, controller, x, y):
MouseEvent(
"motion_notify_event", self, *self._mpl_coords((x, y)),
buttons=self._mpl_buttons(controller),
modifiers=self._mpl_modifiers(controller),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
def enter_notify_event(self, controller, x, y):
LocationEvent(
"figure_enter_event", self, *self._mpl_coords((x, y)),
modifiers=self._mpl_modifiers(),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
def leave_notify_event(self, controller):
LocationEvent(
"figure_leave_event", self, *self._mpl_coords(),
modifiers=self._mpl_modifiers(),
guiEvent=controller.get_current_event() if _GOBJECT_GE_3_47 else None,
)._process()
def resize_event(self, area, width, height):
self._update_device_pixel_ratio()
dpi = self.figure.dpi
winch = width * self.device_pixel_ratio / dpi
hinch = height * self.device_pixel_ratio / dpi
self.figure.set_size_inches(winch, hinch, forward=False)
ResizeEvent("resize_event", self)._process()
self.draw_idle()
def _mpl_buttons(self, controller):
# NOTE: This spews "Broken accounting of active state" warnings on
# right click on macOS.
surface = self.get_native().get_surface()
is_over, x, y, event_state = surface.get_device_position(
self.get_display().get_default_seat().get_pointer())
# NOTE: alternatively we could use
# event_state = controller.get_current_event_state()
# but for button_press/button_release this would report the state
# *prior* to the event rather than after it; the above reports the
# state *after* it.
mod_table = [
(MouseButton.LEFT, Gdk.ModifierType.BUTTON1_MASK),
(MouseButton.MIDDLE, Gdk.ModifierType.BUTTON2_MASK),
(MouseButton.RIGHT, Gdk.ModifierType.BUTTON3_MASK),
(MouseButton.BACK, Gdk.ModifierType.BUTTON4_MASK),
(MouseButton.FORWARD, Gdk.ModifierType.BUTTON5_MASK),
]
return {name for name, mask in mod_table if event_state & mask}
def _mpl_modifiers(self, controller=None):
if controller is None:
surface = self.get_native().get_surface()
is_over, x, y, event_state = surface.get_device_position(
self.get_display().get_default_seat().get_pointer())
else:
event_state = controller.get_current_event_state()
mod_table = [
("ctrl", Gdk.ModifierType.CONTROL_MASK),
("alt", Gdk.ModifierType.ALT_MASK),
("shift", Gdk.ModifierType.SHIFT_MASK),
("super", Gdk.ModifierType.SUPER_MASK),
]
return [name for name, mask in mod_table if event_state & mask]
def _get_key(self, keyval, keycode, state):
unikey = chr(Gdk.keyval_to_unicode(keyval))
key = cbook._unikey_or_keysym_to_mplkey(
unikey,
Gdk.keyval_name(keyval))
modifiers = [
("ctrl", Gdk.ModifierType.CONTROL_MASK, "control"),
("alt", Gdk.ModifierType.ALT_MASK, "alt"),
("shift", Gdk.ModifierType.SHIFT_MASK, "shift"),
("super", Gdk.ModifierType.SUPER_MASK, "super"),
]
mods = [
mod for mod, mask, mod_key in modifiers
if (mod_key != key and state & mask
and not (mod == "shift" and unikey.isprintable()))]
return "+".join([*mods, key])
def _realize_event(self, obj):
surface = self.get_native().get_surface()
surface.connect('notify::scale', self._update_device_pixel_ratio)
self._update_device_pixel_ratio()
def _update_device_pixel_ratio(self, *args, **kwargs):
# We need to be careful in cases with mixed resolution displays if
# device_pixel_ratio changes.
if _GTK_GE_4_12:
scale = self.get_native().get_surface().get_scale()
else:
scale = self.get_scale_factor()
assert scale is not None
if self._set_device_pixel_ratio(scale):
self.draw()
def _draw_rubberband(self, rect):
self._rubberband_rect = rect
# TODO: Only update the rubberband area.
self.queue_draw()
def _draw_func(self, drawing_area, ctx, width, height):
self.on_draw_event(self, ctx)
self._post_draw(self, ctx)
def _post_draw(self, widget, ctx):
if self._rubberband_rect is None:
return
lw = 1
dash = 3
x0, y0, w, h = (dim / self.device_pixel_ratio
for dim in self._rubberband_rect)
x1 = x0 + w
y1 = y0 + h
# Draw the lines from x0, y0 towards x1, y1 so that the
# dashes don't "jump" when moving the zoom box.
ctx.move_to(x0, y0)
ctx.line_to(x0, y1)
ctx.move_to(x0, y0)
ctx.line_to(x1, y0)
ctx.move_to(x0, y1)
ctx.line_to(x1, y1)
ctx.move_to(x1, y0)
ctx.line_to(x1, y1)
ctx.set_antialias(1)
ctx.set_line_width(lw)
ctx.set_dash((dash, dash), 0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
ctx.set_dash((dash, dash), dash)
ctx.set_source_rgb(1, 1, 1)
ctx.stroke()
def on_draw_event(self, widget, ctx):
# to be overwritten by GTK4Agg or GTK4Cairo
pass
def draw(self):
# docstring inherited
if self.is_drawable():
self.queue_draw()
def draw_idle(self):
# docstring inherited
if self._idle_draw_id != 0:
return
def idle_draw(*args):
try:
self.draw()
finally:
self._idle_draw_id = 0
return False
self._idle_draw_id = GLib.idle_add(idle_draw)
def flush_events(self):
# docstring inherited
context = GLib.MainContext.default()
while context.pending():
context.iteration(True)
| FigureCanvasGTK4 |
python | mlflow__mlflow | mlflow/genai/utils/trace_utils.py | {
"start": 9080,
"end": 23601
} | class ____:
"""
A context manager to count the number of times NoOpTracer's start_span is called.
The check is done in the following steps so it doesn't have any side effects:
1. Disable tracing.
2. Patch the NoOpTracer.start_span method to count the number of times it is called.
NoOpTracer is used when tracing is disabled.
3. Call the predict function with the sample input.
4. Restore the original NoOpTracer.start_span method and re-enable tracing.
WARNING: This function is not thread-safe. We do not provide support for running
`mlflow.genai.evaluate` in multi-threaded environments.`
"""
def __init__(self):
self.count = 0
def __enter__(self):
self.original = NoOpTracer.start_span
def _patched_start_span(_self, *args, **kwargs):
self.count += 1
return self.original(_self, *args, **kwargs)
NoOpTracer.start_span = _patched_start_span
return self
def __exit__(self, exc_type, exc_value, traceback):
NoOpTracer.start_span = self.original
def is_none_or_nan(value: Any) -> bool:
"""
Checks whether a value is None or NaN.
NB: This function does not handle pandas.NA.
"""
# isinstance(value, float) check is needed to ensure that math.isnan is not called on an array.
return value is None or (isinstance(value, float) and math.isnan(value))
def _is_empty(value: Any) -> bool:
"""
Check if a value is empty (None, empty dict, empty list, empty string, etc.).
"""
if value is None:
return True
if isinstance(value, (dict, list, str)):
return len(value) == 0
return False
def parse_inputs_to_str(value: Any) -> str:
"""Parse the inputs to a string compatible with the judges API"""
if is_none_or_nan(value):
# The DBX managed backend doesn't allow empty inputs. This is
# a temporary workaround to bypass the validation.
return " "
if isinstance(value, str):
return value
value = _to_dict(value)
if (messages := value.get(_MESSAGES_KEY)) and len(messages) > 0:
contents = [m.get(_CONTENT_KEY) for m in messages]
if len(contents) > 1 and all(isinstance(c, str) for c in contents):
return json.dumps(messages)
elif isinstance(contents[-1], str):
return contents[-1]
return str(value)
def parse_outputs_to_str(value: Any) -> str:
"""Parse the outputs to a string compatible with the judges API"""
if is_none_or_nan(value):
return " "
if isinstance(value, str):
return value
# PyFuncModel.predict wraps the output in a list
if isinstance(value, list) and len(value) > 0:
return parse_outputs_to_str(value[0])
value = _to_dict(value)
if _is_chat_choices(value.get(_CHOICES_KEY)):
content = value[_CHOICES_KEY][0][_MESSAGE_KEY][_CONTENT_KEY]
elif _is_chat_messages(value.get(_MESSAGES_KEY)):
content = value[_MESSAGES_KEY][-1][_CONTENT_KEY]
else:
content = json.dumps(value, cls=TraceJSONEncoder)
return content
def _is_chat_choices(maybe_choices: Any) -> bool:
if (
not maybe_choices
or not isinstance(maybe_choices, list)
or not isinstance(maybe_choices[0], dict)
):
return False
message = maybe_choices[0].get(_MESSAGE_KEY)
return _is_chat_messages([message])
def _is_chat_messages(maybe_messages: Any) -> bool:
return (
maybe_messages
and len(maybe_messages) > 0
and isinstance(maybe_messages[-1], dict)
and isinstance(maybe_messages[-1].get(_CONTENT_KEY), str)
)
def _to_dict(obj: Any) -> dict[str, Any]:
if hasattr(obj, "to_dict"):
return obj.to_dict()
if isinstance(obj, BaseModel):
return obj.model_dump()
# Convert to JSON string and then back to dictionary to handle nested objects
json_str = json.dumps(obj, cls=TraceJSONEncoder)
return json.loads(json_str)
def extract_retrieval_context_from_trace(trace: Trace | None) -> dict[str, list[Any]]:
"""
Extract the retrieval context from the trace.
Extracts all top-level retrieval spans from the trace if there are multiple retrieval spans.
If the trace does not have a retrieval span, return an empty dictionary.
⚠️ Warning: Please make sure to not throw exception. If fails, return an empty dictionary.
"""
if trace is None or trace.data is None:
return {}
top_level_retrieval_spans = _get_top_level_retrieval_spans(trace)
if len(top_level_retrieval_spans) == 0:
return {}
retrieved = {}
for retrieval_span in top_level_retrieval_spans:
try:
contexts = [_parse_chunk(chunk) for chunk in retrieval_span.outputs or []]
retrieved[retrieval_span.span_id] = [c for c in contexts if c is not None]
except Exception as e:
_logger.debug(
f"Fail to get retrieval context from span: {retrieval_span}. Error: {e!r}"
)
return retrieved
def _get_top_level_retrieval_spans(trace: Trace) -> list[Span]:
"""
Get the top-level retrieval spans in the trace.
Top-level retrieval spans are retrieval spans that are not children of other retrieval spans.
For example, given the following spans:
- Span A (Chain)
- Span B (Retriever)
- Span C (Retriever)
- Span D (Retriever)
- Span E (LLM)
- Span F (Retriever)
Span B and Span D are top-level retrieval spans.
Span C and Span F are NOT top-level because they are children of other retrieval spans.
"""
top_level_retrieval_spans = []
# Cache span_id -> span mapping for fast lookup
all_spans = {span.span_id: span for span in trace.data.spans}
for span in trace.search_spans(span_type=SpanType.RETRIEVER):
# Check if this span is a child of another retrieval span
parent_id = span.parent_id
while parent_id:
parent_span = all_spans.get(parent_id)
if not parent_span:
# Malformed trace
_logger.debug(
f"Malformed trace: span {span} has parent span ID {parent_id}, "
"but the parent span is not found in the trace."
)
break
if parent_span.span_type == SpanType.RETRIEVER:
# This span is a child of another retrieval span
break
parent_id = parent_span.parent_id
else:
top_level_retrieval_spans.append(span)
return top_level_retrieval_spans
def _parse_chunk(chunk: Any) -> dict[str, Any] | None:
if not isinstance(chunk, dict):
return None
doc = {"content": chunk.get("page_content")}
if doc_uri := chunk.get("metadata", {}).get("doc_uri"):
doc["doc_uri"] = doc_uri
return doc
def clean_up_extra_traces(traces: list[Trace], eval_start_time: int) -> list[Trace]:
"""
Clean up noisy traces generated outside predict function.
Evaluation run should only contain traces that is being evaluated or generated by the predict
function. If not, the result will not show the correct list of traces.
Sometimes, there are extra traces generated during the evaluation, for example, custom scorer
code might generate traces. This function cleans up those noisy traces.
Args:
traces: List of traces to clean up.
eval_start_time: The start time of the evaluation run.
Returns:
List of traces that are kept after cleaning up extra traces.
"""
from mlflow.tracking.fluent import _get_experiment_id
try:
extra_trace_ids = [
trace.info.trace_id
for trace in traces
if not _should_keep_trace(trace, eval_start_time)
]
if extra_trace_ids:
_logger.debug(
f"Found {len(extra_trace_ids)} extra traces generated during evaluation run. "
"Deleting them."
)
# Import MlflowClient locally to avoid issues with tracing-only SDK
from mlflow.tracking.client import MlflowClient
MlflowClient().delete_traces(
experiment_id=_get_experiment_id(), trace_ids=extra_trace_ids
)
for trace_id in extra_trace_ids:
IPythonTraceDisplayHandler.get_instance().traces_to_display.pop(trace_id, None)
else:
_logger.debug("No extra traces found during evaluation run.")
except Exception as e:
_logger.debug(
f"Failed to clean up extra traces generated during evaluation. The "
f"result page might not show the correct list of traces. Error: {e}"
)
def _should_keep_trace(trace: Trace, eval_start_time: int) -> bool:
# We should not delete traces that are generated before the evaluation run started.
if trace.info.timestamp_ms < eval_start_time:
return True
# If the scorer tracing is enabled, keep traces generated by scorers.
if (
MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING.get()
and TraceTagKey.SOURCE_SCORER_NAME in trace.info.tags
):
return True
# Otherwise, only keep traces from the prediction function.
return TraceTagKey.EVAL_REQUEST_ID in trace.info.tags
def construct_eval_result_df(
run_id: str,
traces: list[Trace],
eval_results: list["EvalResult"],
) -> "pd.DataFrame | None":
"""
Construct a pandas DataFrame from the traces and eval results.
Args:
run_id: The MLflow run ID of the evaluation run.
traces: List of traces. Only TraceInfo is used here, and **spans are ignored&**.
The expected input to this function is the result of
`mlflow.search_traces(include_spans=False, return_type="list")`.
eval_results: List of eval results containing the full spans.
Returns:
A pandas DataFrame with the eval results.
"""
import pandas as pd
if not traces:
return None
try:
trace_id_to_info = {t.info.trace_id: t.info for t in traces}
traces = [
Trace(
info=trace_id_to_info[eval_result.eval_item.trace.info.trace_id],
data=eval_result.eval_item.trace.data,
)
for eval_result in eval_results
]
df = traces_to_df(traces)
# Add unpacked assessment columns. The result df should look like:
# [trace_id, score_1/value, score_2/value, trace, state, ...]
assessments = (
df["assessments"].apply(lambda x: _get_assessment_values(x, run_id)).apply(pd.Series)
)
trace_id_column = df.pop("trace_id")
return pd.concat([trace_id_column, assessments, df], axis=1)
except Exception as e:
_logger.debug(f"Failed to construct eval result DataFrame: {e}", exc_info=True)
def _get_assessment_values(assessments: list[dict[str, Any]], run_id: str) -> dict[str, Any]:
result = {}
for a in assessments:
if (
# Exclude feedbacks from other evaluation runs
(source_run_id := a.get("metadata", {}).get(AssessmentMetadataKey.SOURCE_RUN_ID))
and source_run_id != run_id
):
continue
if feedback := a.get("feedback"):
result[f"{a['assessment_name']}/value"] = feedback.get("value")
elif expectation := a.get("expectation"):
result[f"{a['assessment_name']}/value"] = expectation.get("value")
return result
def create_minimal_trace(eval_item: "EvalItem") -> Trace:
"""
Create a minimal trace object with a single span, based on given inputs/outputs.
If the eval_item has a source with session metadata (from a dataset created from traces),
the session metadata will be restored on the newly created trace. This enables session-level
scorers to identify which traces belong to the same session.
"""
from mlflow.pyfunc.context import Context, set_prediction_context
# Extract session metadata from source if available
session_metadata = {}
if eval_item.source and hasattr(eval_item.source, "source_data"):
source_data = eval_item.source.source_data
if session_id := source_data.get("session_id"):
session_metadata[TraceMetadataKey.TRACE_SESSION] = session_id
context = Context(request_id=eval_item.request_id, is_evaluate=True)
with set_prediction_context(context):
with mlflow.start_span(name="root_span", span_type=SpanType.CHAIN) as root_span:
root_span.set_inputs(eval_item.inputs)
root_span.set_outputs(eval_item.outputs)
# Set session metadata on the trace while it's still active
if session_metadata:
mlflow.update_current_trace(metadata=session_metadata)
return mlflow.get_trace(root_span.trace_id)
# MB: Caching on tracking URI level to avoid unnecessary checks for each trace.
@cached(cache={}, key=lambda **kwargs: kwargs["tracking_uri"])
def _does_store_support_trace_linking(*, tracking_uri: str, trace: Trace, run_id: str) -> bool:
# Databricks backend is guaranteed to support trace linking
if is_databricks_uri(tracking_uri):
return True
try:
MlflowClient(tracking_uri).link_traces_to_run([trace.info.trace_id], run_id=run_id)
return True
except Exception:
return False
def batch_link_traces_to_run(
run_id: str | None, eval_results: list["EvalResult"], max_batch_size: int = 100
) -> None:
"""
Batch link traces to a run to avoid rate limits.
Args:
run_id: The MLflow run ID to link traces to
eval_results: List of evaluation results containing traces
max_batch_size: Maximum number of traces to link per batch call
"""
trace_ids = [eval_result.eval_item.trace.info.trace_id for eval_result in eval_results]
# Batch the trace IDs to avoid overwhelming the MLflow backend
for i in range(0, len(trace_ids), max_batch_size):
batch = trace_ids[i : i + max_batch_size]
try:
MlflowClient().link_traces_to_run(run_id=run_id, trace_ids=batch)
except Exception as e:
# FileStore doesn't support trace linking, so we skip it
if "Linking traces to runs is not supported in FileStore." in str(e):
return
_logger.warning(f"Failed to link batch of traces to run: {e}")
| NoOpTracerPatcher |
python | gevent__gevent | src/gevent/tests/test__socket_dns.py | {
"start": 27472,
"end": 29207
} | class ____(TestCase):
def _test_getaddrinfo(self, *args):
self._test('getaddrinfo', *args)
def test_80(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 80)
def test_int_string(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, '80')
def test_0(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 0)
def test_http(self):
self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 'http')
def test_notexistent_tld(self):
self._test_getaddrinfo('myhost.mytld', 53)
def test_notexistent_dot_com(self):
self._test_getaddrinfo('sdfsdfgu5e66098032453245wfdggd.com', 80)
def test1(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 52, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, 0)
def test2(self):
return self._test_getaddrinfo(TestGeventOrg.HOSTNAME, 53, socket.AF_INET, socket.SOCK_DGRAM, 17)
@unittest.skipIf(RESOLVER_DNSPYTHON,
"dnspython only returns some of the possibilities")
def test3(self):
return self._test_getaddrinfo('google.com', 'http', socket.AF_INET6)
@greentest.skipIf(PY2, "Enums only on Python 3.4+")
def test_enums(self):
# https://github.com/gevent/gevent/issues/1310
# On Python 3, getaddrinfo does special things to make sure that
# the fancy enums are returned.
gai = gevent_socket.getaddrinfo('example.com', 80,
socket.AF_INET,
socket.SOCK_STREAM, socket.IPPROTO_TCP)
af, socktype, _proto, _canonname, _sa = gai[0]
self.assertIs(socktype, socket.SOCK_STREAM)
self.assertIs(af, socket.AF_INET)
| Test_getaddrinfo |
python | tensorflow__tensorflow | tensorflow/python/distribute/v1/all_reduce_test.py | {
"start": 1286,
"end": 10503
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testFlattenTensorsShapesDefined(self):
x = array_ops.placeholder(types_pb2.DT_FLOAT, [None])
with self.assertRaisesRegex(ValueError, "must have statically known shape"):
ar._flatten_tensors([x, x])
def testRingPermutations(self):
# 0 devices
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 0, [])
self.assertEqual(pred_by_c_d, [])
self.assertEqual(rank_by_c_d, [])
# 1 worker, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
self.assertEqual(pred_by_c_d, [[0]])
self.assertEqual(rank_by_c_d, [[0]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[2, 0, 1]])
self.assertEqual(rank_by_c_d, [[0, 1, 2]])
# multiple workers, 1 subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[5, 0, 1, 2, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(3, 1, [0, 1, 2])
self.assertEqual(pred_by_c_d, [[8, 0, 1, 2, 3, 4, 5, 6, 7]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7, 8]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 1, [2, 1, 0])
self.assertEqual(pred_by_c_d, [[1, 2, 3, 4, 5, 0]])
self.assertEqual(rank_by_c_d, [[2, 1, 0, 5, 4, 3]])
# 1 worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [2, 3, 0, 1]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 4, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[3, 0, 1, 2], [3, 0, 1, 2],
[3, 0, 1, 2], [3, 0, 1, 2]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3], [3, 0, 1, 2],
[2, 3, 0, 1], [1, 2, 3, 0]])
# multiple worker, multiple subchunk cases
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 1, 2, 3])
self.assertEqual(pred_by_c_d, [[7, 0, 1, 2, 3, 4, 5, 6],
[3, 0, 5, 2, 7, 4, 1, 6]])
self.assertEqual(rank_by_c_d, [[0, 1, 2, 3, 4, 5, 6, 7],
[2, 3, 0, 1, 6, 7, 4, 5]])
pred_by_c_d, rank_by_c_d = ar._ring_permutations(2, 2, [0, 3, 2, 1])
self.assertEqual(pred_by_c_d, [[5, 2, 3, 0, 1, 6, 7, 4],
[1, 2, 7, 0, 5, 6, 3, 4]])
self.assertEqual(rank_by_c_d, [[0, 3, 2, 1, 4, 7, 6, 5],
[2, 1, 0, 3, 6, 5, 4, 7]])
def _buildInput(self, num_workers, num_gpus):
t8 = constant_op.constant(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
types_pb2.DT_FLOAT)
input_tensors = []
device_names = []
for w in range(0, num_workers):
for d in range(0, num_gpus):
dn = "/replica:0/task:%d/device:GPU:%d" % (w, d % num_gpus)
device_names.append(dn)
with ops.device(dn):
input_tensors.append(array_ops.identity(t8))
return input_tensors, device_names
@test_util.run_deprecated_v1
def testBuildRingGatherPassStructure(self):
# 1 worker, 1 device
input_tensors, device_names = self._buildInput(1, 1)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 1, [0])
output_tensors = ar._build_ring_gather(input_tensors, device_names, 1,
pred_by_c_d, rank_by_c_d,
math_ops.add)
self.assertEqual(output_tensors, input_tensors)
# 1 worker, 4 devices, 2 subchunks
input_tensors, device_names = self._buildInput(1, 4)
pred_by_c_d, rank_by_c_d = ar._ring_permutations(1, 2, [0, 1, 2, 3])
output_tensors, pad_len = ar._build_ring_gather(
input_tensors, device_names, 2, pred_by_c_d, rank_by_c_d, math_ops.add)
self.assertEqual(0, pad_len)
# same number outputs as inputs
self.assertEqual(len(output_tensors), len(input_tensors))
num_chunks = 2 * len(input_tensors)
tlen = tensor_shape.dimension_value(input_tensors[0].shape[0])
for otl in output_tensors:
self.assertEqual(len(otl), num_chunks)
for ot in otl:
self.assertEqual(ot.shape, [tlen//num_chunks])
def _buildInitialVars(self, shape, dev_list):
values = []
num_devices = len(dev_list)
dim = np.prod(shape, dtype=int) if shape else 1
for d in range(0, num_devices):
with ops.device(dev_list[d]):
npt = np.zeros(shape).astype(np.float32)
alias = np.frombuffer(npt.data, dtype=np.float32)
for i in range(0, dim):
alias[i] = i + 0.01 * d
var = state_ops.variable_op(shape, types_pb2.DT_FLOAT)
state_ops.init_variable(var, npt).op.run()
values.append(var)
return values
# pylint: disable=g-long-lambda
def _buildRing(self, num_workers, num_gpus, subdiv):
gpu_perm = range(0, num_gpus)
return lambda x, un_op: ar.build_ring_all_reduce(
x, num_workers, subdiv, gpu_perm, math_ops.add, un_op)
def _testAllReduce(self, num_workers, num_gpus, shape, build_f):
# Use local CPU as device for all inputs.
num_devices = num_workers * num_gpus
dev_list = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_devices)]
with self.cached_session():
input_tensors = self._buildInitialVars(shape, dev_list)
un_op = lambda x: math_ops.div(
x, constant_op.constant(num_devices, dtype=types_pb2.DT_FLOAT))
simple_sum = math_ops.add_n(input_tensors)
simple_sum.op.run()
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced, self.evaluate(simple_sum))
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()
build_f = self._buildRing(num_workers, num_gpus, subdiv)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RingAllReduce num_workers=%d num_gpus=%d shape=%s "
"subdiv=%d elapsed=%f" %
(num_workers, num_gpus, shape, subdiv, elapsed))
@test_util.run_deprecated_v1
def testRingAllReduce(self):
self._testRingAllReduce(1, 2, [], 1)
self._testRingAllReduce(1, 2, [8], 1)
self._testRingAllReduce(1, 2, [4, 4], 1)
self._testRingAllReduce(6, 1, [8], 1)
self._testRingAllReduce(1, 8, [32], 1)
self._testRingAllReduce(1, 8, [120], 1)
self._testRingAllReduce(2, 8, [7, 13], 1)
self._testRingAllReduce(2, 8, [8, 8], 2)
self._testRingAllReduce(2, 8, [8, 8], 4)
# TODO(tucker): The following test is surprisingly slow.
# Diagnose and fix before re-enabling.
# self._testRingAllReduce(4, 8, [8, 8, 2], 4)
def _buildShuffle(self, num_workers, num_gpus, num_shards):
# Use local CPU for all shuffle shards
gather_devices = ["/replica:0/task:0/device:CPU:0"
for _ in range(num_shards)]
return lambda x, un_op: ar.build_shuffle_all_reduce(
x, gather_devices, math_ops.add_n, un_op)
def _testShuffleAllReduce(self, num_workers, num_gpus, shape, num_shards):
start_time = time.time()
build_f = self._buildShuffle(num_workers, num_gpus, num_shards)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("ShuffleAllReduce num_workers=%d num_gpus=%d shape=%s "
"elapsed=%f" % (num_workers, num_gpus, shape, elapsed))
@test_util.run_deprecated_v1
def testShuffleAllReduce(self):
self._testShuffleAllReduce(1, 2, [], 1)
self._testShuffleAllReduce(1, 2, [8], 1)
self._testShuffleAllReduce(1, 2, [4, 4], 1)
self._testShuffleAllReduce(1, 8, [32], 1)
self._testShuffleAllReduce(1, 8, [120], 1)
self._testShuffleAllReduce(2, 8, [7, 13], 3)
self._testShuffleAllReduce(2, 8, [8, 8], 2)
self._testShuffleAllReduce(2, 8, [8, 8], 4)
self._testShuffleAllReduce(4, 8, [8, 8, 2], 4)
def _buildRecursiveHD(self, num_workers, num_gpus):
return lambda x, un_op: ar.build_recursive_hd_all_reduce(
x, math_ops.add, un_op)
# pylint: enable=g-long-lambda
def _testRecursiveHDAllReduce(self, num_workers, num_gpus, shape):
start_time = time.time()
build_f = self._buildRecursiveHD(num_workers, num_gpus)
self._testAllReduce(num_workers, num_gpus, shape, build_f)
elapsed = time.time() - start_time
tf_logging.info("RecursiveHDAllReduce num_workers=%d num_gpus=%d "
"shape=%s elapsed=%f" %
(num_workers, num_gpus, shape, elapsed))
@test_util.run_deprecated_v1
def testRecursiveHDAllReduce(self):
self._testRecursiveHDAllReduce(1, 2, [8])
self._testRecursiveHDAllReduce(1, 2, [4, 4])
self._testRecursiveHDAllReduce(1, 8, [32])
self._testRecursiveHDAllReduce(1, 8, [120])
self._testRecursiveHDAllReduce(2, 8, [8, 8])
self._testRecursiveHDAllReduce(4, 8, [8, 8, 2])
if __name__ == "__main__":
test.main()
| AllReduceTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass2.py | {
"start": 356,
"end": 502
} | class ____:
str_to_int: Callable[[str], int] = f
c = C()
reveal_type(c.str_to_int, expected_text="(str) -> int")
c.str_to_int = decorate(f)
| C |
python | doocs__leetcode | solution/0700-0799/0743.Network Delay Time/Solution2.py | {
"start": 0,
"end": 591
} | class ____:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
g = [[] for _ in range(n)]
for u, v, w in times:
g[u - 1].append((v - 1, w))
dist = [inf] * n
dist[k - 1] = 0
pq = [(0, k - 1)]
while pq:
d, u = heappop(pq)
if d > dist[u]:
continue
for v, w in g[u]:
if (nd := d + w) < dist[v]:
dist[v] = nd
heappush(pq, (nd, v))
ans = max(dist)
return -1 if ans == inf else ans
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-pgvector/destination_pgvector/common/catalog/catalog_providers.py | {
"start": 676,
"end": 3619
} | class ____:
"""A catalog provider wraps a configured catalog and configured streams.
This class is responsible for providing information about the catalog and streams.
Note:
- The catalog provider is not responsible for managing the catalog or streams but it may
be updated with new streams as they are discovered.
"""
def __init__(
self,
configured_catalog: ConfiguredAirbyteCatalog,
) -> None:
"""Initialize the catalog manager with a catalog object reference.
Since the catalog is passed by reference, the catalog manager may be updated with new
streams as they are discovered.
"""
self._catalog: ConfiguredAirbyteCatalog = configured_catalog
@property
def configured_catalog(self) -> ConfiguredAirbyteCatalog:
return self._catalog
@property
def stream_names(self) -> list[str]:
return list({stream.stream.name for stream in self.configured_catalog.streams})
def get_configured_stream_info(
self,
stream_name: str,
) -> ConfiguredAirbyteStream:
"""Return the column definitions for the given stream."""
if not self.configured_catalog:
raise exc.PyAirbyteInternalError(
message="Cannot get stream JSON schema without a catalog.",
)
matching_streams: list[ConfiguredAirbyteStream] = [
stream
for stream in self.configured_catalog.streams
if stream.stream.name == stream_name
]
if not matching_streams:
raise exc.AirbyteStreamNotFoundError(
stream_name=stream_name,
context={
"available_streams": [
stream.stream.name for stream in self.configured_catalog.streams
],
},
)
if len(matching_streams) > 1:
raise exc.PyAirbyteInternalError(
message="Multiple streams found with same name.",
context={
"stream_name": stream_name,
},
)
return matching_streams[0]
@final
def get_stream_json_schema(
self,
stream_name: str,
) -> dict[str, Any]:
"""Return the column definitions for the given stream."""
return self.get_configured_stream_info(stream_name).stream.json_schema
def get_stream_properties(
self,
stream_name: str,
) -> dict[str, dict]:
"""Return the names of the top-level properties for the given stream."""
return self.get_stream_json_schema(stream_name)["properties"]
def get_destination_sync_mode(
self,
stream_name: str,
) -> DestinationSyncMode:
"""Return the destination sync mode for the given stream."""
return self.get_configured_stream_info(stream_name).destination_sync_mode
| CatalogProvider |
python | kamyu104__LeetCode-Solutions | Python/minimum-sum-of-four-digit-number-after-splitting-digits.py | {
"start": 79,
"end": 1141
} | class ____(object):
def minimumSum(self, num):
"""
:type num: int
:rtype: int
"""
def inplace_counting_sort(nums, reverse=False): # Time: O(n)
count = [0]*(max(nums)+1)
for num in nums:
count[num] += 1
for i in xrange(1, len(count)):
count[i] += count[i-1]
for i in reversed(xrange(len(nums))): # inplace but unstable sort
while nums[i] >= 0:
count[nums[i]] -= 1
j = count[nums[i]]
nums[i], nums[j] = nums[j], ~nums[i]
for i in xrange(len(nums)):
nums[i] = ~nums[i] # restore values
if reverse: # unstable sort
nums.reverse()
nums = map(int, list(str(num)))
inplace_counting_sort(nums)
a = b = 0
for x in nums:
a = a*10+x
a, b = b, a
return a+b
# Time: O(dlogd) = O(1), d is the number of digits
# Space: O(d) = O(1)
# greedy
| Solution |
python | walkccc__LeetCode | solutions/2137. Pour Water Between Buckets to Make Water Levels Equal/2137.py | {
"start": 0,
"end": 540
} | class ____:
def equalizeWater(self, buckets: list[int], loss: int) -> float:
ERR = 1e-5
PERCENTAGE = (100 - loss) / 100
l = 0.0
r = max(buckets)
def canFill(target: float) -> bool:
extra = 0
need = 0
for bucket in buckets:
if bucket > target:
extra += bucket - target
else:
need += target - bucket
return extra * PERCENTAGE >= need
while r - l > ERR:
m = (l + r) / 2
if canFill(m):
l = m
else:
r = m
return l
| Solution |
python | pytorch__pytorch | torch/_inductor/codegen/memory_planning.py | {
"start": 16668,
"end": 18331
} | class ____:
"""
Due to inplace reuse an allocated buffer can have many names.
This tracks these collections of buffers sharing underlying memory.
"""
def __init__(self, node: BufferLike):
self.node = node
self.names = [node.get_name()]
self.is_output = False
self.allocation: Optional[Allocation] = None
self.live_range = LiveRange(float("inf"), -float("inf"))
def update_usage(self, timestep: int):
"""Expand self.live_range to include timestep"""
self.live_range = LiveRange(
min(timestep, self.live_range.begin),
max(timestep, self.live_range.end),
)
def sym_nbytes(self):
return self.node.get_layout().storage_size() * self.node.get_dtype().itemsize
def make_allocation(self):
assert not self.allocation, "multiple allocations"
assert isinstance(self.live_range.begin, int), "live ranges not computed"
nbytes = self.sym_nbytes()
# For now, fallback value will be used if we encounter an unbacked SymInt. The longer-term plan is to have
# size_hint() use better heuristics for unbackeds, at which point the fallback value will be ignored.
size_hint = V.graph.sizevars.size_hint(nbytes, fallback=64)
self.allocation = Allocation(
self.node,
self.live_range,
size_hint=size_hint,
symbolic_size=nbytes,
)
def __repr__(self):
return (
f"{self.__class__.__name__}({self.names!r}, is_output={self.is_output}, "
f"live_range={self.live_range}"
)
@dataclasses.dataclass
| BufferGroup |
python | huggingface__transformers | tests/models/jamba/test_modeling_jamba.py | {
"start": 3711,
"end": 12008
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
attn_layer_offset=1,
attn_layer_period=8,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.attn_layer_offset = attn_layer_offset
self.attn_layer_period = attn_layer_period
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return JambaConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
attn_layer_offset=self.attn_layer_offset,
attn_layer_period=self.attn_layer_period,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=True,
initializer_range=self.initializer_range,
use_mamba_kernels=False,
num_experts=2,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = JambaModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = JambaForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids, labels=token_labels)
result = model(input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
config.add_cross_attention = True
model = JambaForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
# Attention: Jamba needs the cache to be initialized to return a cache!
past_key_values = HybridMambaAttentionDynamicCache(
config, input_ids.shape[0], model.dtype, device=model.device
)
outputs = model(
input_ids,
attention_mask=input_mask,
past_key_values=past_key_values,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
cache_position=torch.arange(
input_ids.shape[1], input_ids.shape[1] + next_tokens.shape[1], device=model.device
),
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_sequence_classification(
self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = JambaForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| JambaModelTester |
python | pytorch__pytorch | test/test_out_dtype_op.py | {
"start": 681,
"end": 9395
} | class ____(TestCase):
def test_out_dtype_make_fx(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm = make_fx(m)(x)
self.assertTrue(torch.allclose(m(x), gm(x)))
gm = make_fx(torch.func.functionalize(M(weight)))(x)
self.assertTrue(torch.allclose(m(x), gm(x)))
FileCheck().check("torch.ops.higher_order.out_dtype").check("aten.mm.default").run(gm.code)
self.assertTrue(torch.allclose(m(x), gm(x)))
for node in gm.graph.nodes:
if node.op == "call_function" and node.target is out_dtype:
# Result of this node should be int32
self.assertTrue(node.meta["val"].dtype, torch.int32)
# Argument of this node should be int8
self.assertTrue(node.args[2].meta["val"].dtype, torch.int8)
def test_out_dtype_op_functional(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
ep = torch.export.export(m, (x,), strict=True)
FileCheck().check("torch.ops.higher_order.out_dtype").check(
"aten.mm.default"
).run(ep.graph_module.code)
self.assertTrue(torch.allclose(m(x), ep.module()(x)))
for node in ep.graph.nodes:
if node.op == "call_function" and node.target is out_dtype:
# Result of this node should be int32
self.assertTrue(node.meta["val"].dtype, torch.int32)
# Argument of this node should be int8
self.assertTrue(node.args[2].meta["val"].dtype, torch.int8)
def test_out_dtype_mm_numerical(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, self.weight
)
weight = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
m = M(weight)
x = torch.randint(-128, 127, (5, 5), dtype=torch.int8)
gm = make_fx(m)(x)
x_casted = x.to(torch.int32)
weight_casted = weight.to(torch.int32)
numerical_res = torch.ops.aten.mm.default(x_casted, weight_casted)
self.assertTrue(torch.allclose(numerical_res, gm(x)))
def test_out_dtype_dynamo(self):
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
compiled = torch.compile(f, backend="eager", fullgraph=True)
self.assertTrue(torch.allclose(f(*inp), compiled(*inp)))
def test_out_dtype_mul_scalar_numerical(self):
def f(x, y):
return out_dtype(
torch.ops.aten.mul.Scalar, torch.int32, x, y
)
inp = (torch.randint(-128, 127, (5, 5), dtype=torch.int8), 3.0)
gm = make_fx(f)(*inp)
numerical_res = torch.ops.aten.mul.Scalar(inp[0].to(dtype=torch.int32), 3)
self.assertTrue(torch.allclose(numerical_res, gm(*inp)))
def test_out_dtype_non_functional(self):
class M(torch.nn.Module):
def forward(self, x, y):
return out_dtype(
torch.ops.aten.add_.Tensor, torch.int32, x, y
)
with self.assertRaisesRegex(ValueError, "out_dtype's first argument needs to be a functional operator"):
_ = torch.export.export(
M(),
(
torch.randint(-128, 127, (5, 5), dtype=torch.int8),
torch.randint(-128, 127, (5, 5), dtype=torch.int8),
),
strict=True,
)
def test_out_dtype_non_op_overload(self):
def f(x, y):
return out_dtype(
torch.add, torch.int32, x, y
)
with self.assertRaisesRegex(ValueError, "out_dtype's first argument must be an OpOverload"):
f(torch.randint(-128, 127, (5, 5), dtype=torch.int8), torch.randint(-128, 127, (5, 5), dtype=torch.int8))
def test_out_dtype_no_autograd(self):
def f(x, y):
return out_dtype(
torch.ops.aten.mm.default, torch.int32, x, y
)
inp = (torch.randn(5, 5, requires_grad=True), torch.randn(5, 5, requires_grad=True))
# error is delayed
f(*inp)
with torch.no_grad():
f(*inp)
with self.assertRaisesRegex(RuntimeError, "does not require grad and does not have a grad_fn"):
out = f(*inp)
loss = out - torch.ones(out.shape)
loss.backward()
@unittest.skipIf(IS_WINDOWS, "_int_mm unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "_int_mm unavailable")
@unittest.skipIf(not SM80OrLater, "_int_mm unavailable")
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@unittest.skipIf(_get_torch_cuda_version() >= (11, 7), "_int_mm unavailable")
@unittest.skipIf(not TEST_CUDA, "_int_mm unavailable")
@skipIfNoDynamoSupport
def test_out_dtype_inductor_decomp(self) -> None:
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
ref = torch._int_mm(x, w)
test_out = func(x, w)
func_comp = torch.compile(func, fullgraph=True, mode="max-autotune")
test_out_c = func_comp(x, w)
self.assertTrue(torch.allclose(ref, test_out))
self.assertTrue(torch.allclose(ref, test_out_c))
@unittest.skipIf(not TEST_CUDA, "cuda only")
def test_out_dtype_inductor_decomp_trace(self) -> None:
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
# Check that make_fx with inductor decomps produces _int_mm
decomp_table = torch._inductor.decomposition.select_decomp_table()
gm = make_fx(func, decomp_table, tracing_mode="symbolic")(x, w)
self.assertExpectedInline(gm.code.strip(), """\
def forward(self, x_1, w_1):
_int_mm = torch.ops.aten._int_mm.default(x_1, w_1); x_1 = w_1 = None
return _int_mm""")
@unittest.skipIf(not TEST_CUDA, "cuda only")
def test_out_dtype_int_mm_default_trace(self) -> None:
def func(x, w):
return out_dtype(torch.ops.aten.mm.default, torch.int32, x, w)
w = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
x = torch.randint(-128, 127, (32, 32), dtype=torch.int8, device="cuda")
# By default, out_dtype is preserved in the trace
gm = make_fx(func, tracing_mode="symbolic")(x, w)
self.assertExpectedInline(gm.code.strip(), """\
def forward(self, x_1, w_1):
out_dtype = torch.ops.higher_order.out_dtype(torch.ops.aten.mm.default, torch.int32, x_1, w_1); x_1 = w_1 = None
return out_dtype""")
def test_out_dtype_wrong_output(self) -> None:
def multiple_out(x):
return out_dtype(
torch.ops.aten.topk.default, torch.int32, x, 5
)
inp = (torch.randn(10),)
with self.assertRaisesRegex(ValueError, "out_dtype's can only apply to ops that return a single tensor"):
multiple_out(*inp)
def singleton_list_out(x):
return out_dtype(
torch.ops.aten.split_copy.Tensor, torch.int32, x, 10
)
with self.assertRaisesRegex(ValueError, "out_dtype's can only apply to ops that return a single tensor"):
singleton_list_out(*inp)
if __name__ == '__main__':
run_tests()
| TestOutDtypeOp |
python | ray-project__ray | rllib/models/preprocessors.py | {
"start": 5599,
"end": 5973
} | class ____(Preprocessor):
@override(Preprocessor)
def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]:
return (128,)
@override(Preprocessor)
def transform(self, observation: TensorType) -> np.ndarray:
self.check_shape(observation)
return (observation.astype("float32") - 128) / 128
@OldAPIStack
| AtariRamPreprocessor |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 20865,
"end": 21207
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a saved search."""
name: str = Field(default=..., description="The name of the saved search.")
filters: list[objects.SavedSearchFilter] = Field(
default_factory=lambda: [], description="The filter set for the saved search."
)
| SavedSearchCreate |
python | pyparsing__pyparsing | examples/tiny/tiny_ast.py | {
"start": 13379,
"end": 15195
} | class ____(TinyNode):
"""Repeat-Until loop node (do-while semantics).
Executes the body statements at least once, then evaluates the `cond`
expression after each iteration, terminating when it evaluates to true.
"""
statement_type: ClassVar[str] = "repeat_stmt"
# Body statements for the repeat block
statements: list[TinyNode] = field(default_factory=list)
# Until condition expression evaluated after each iteration
cond: object | None = None
@classmethod
def from_parsed(cls, parsed: pp.ParseResults) -> RepeatStmtNode:
# Build child statement nodes from the parsed body sequence
statement_nodes: list[TinyNode] = []
for stmt in parsed.body:
if isinstance(stmt, pp.ParseResults) and "type" in stmt:
node_cls = TinyNode.from_statement_type(stmt["type"]) # type: ignore[index]
if node_cls is not None:
statement_nodes.append(node_cls.from_parsed(stmt)) # type: ignore[arg-type]
# Condition is mandatory in the parser's definition of repeat-until
cond_expr = parsed.cond
return cls(statements=statement_nodes, cond=cond_expr)
def execute(self, engine: "TinyEngine") -> object | None: # noqa: F821 - forward ref
# Repeat-Until is a do-while: execute body, then check condition; stop when condition is true
while True:
for node in self.statements:
# Return statements now propagate via exception; no need to inspect results
node.execute(engine)
# Evaluate loop condition after executing the body
cond_val = engine.eval_expr(self.cond) if self.cond is not None else False
if bool(cond_val):
break
return None
@dataclass
| RepeatStmtNode |
python | walkccc__LeetCode | solutions/42. Trapping Rain Water/42-2.py | {
"start": 0,
"end": 417
} | class ____:
def trap(self, height: list[int]) -> int:
if not height:
return 0
ans = 0
l = 0
r = len(height) - 1
maxL = height[l]
maxR = height[r]
while l < r:
if maxL < maxR:
ans += maxL - height[l]
l += 1
maxL = max(maxL, height[l])
else:
ans += maxR - height[r]
r -= 1
maxR = max(maxR, height[r])
return ans
| Solution |
python | readthedocs__readthedocs.org | readthedocs/builds/managers.py | {
"start": 699,
"end": 2813
} | class ____(models.Manager):
"""
Version manager for manager only queries.
For queries not suitable for the :py:class:`VersionQuerySet`, such as create
queries.
"""
@classmethod
def from_queryset(cls, queryset_class, class_name=None):
# This is overridden because :py:meth:`models.Manager.from_queryset`
# uses `inspect` to retrieve the class methods, and the proxy class has
# no direct members.
queryset_class = get_override_class(
VersionQuerySet,
VersionQuerySet._default_class,
)
return super().from_queryset(queryset_class, class_name)
def create_stable(self, **kwargs):
defaults = {
"slug": STABLE,
"verbose_name": STABLE_VERBOSE_NAME,
"machine": True,
"active": True,
# TODO: double-check if we still require the `identifier: STABLE` field.
# At the time of creation, we don't really know what's the branch/tag identifier
# for the STABLE version. It makes sense to be `None`, probably.
#
# Note that we removed the `identifier: LATEST` from `create_latest` as a way to
# use the default branch.
"identifier": STABLE,
"type": TAG,
}
defaults.update(kwargs)
return self.create(**defaults)
def create_latest(self, **kwargs):
defaults = {
"slug": LATEST,
"verbose_name": LATEST_VERBOSE_NAME,
"machine": True,
"active": True,
"type": BRANCH,
}
defaults.update(kwargs)
return self.create(**defaults)
def get_object_or_log(self, **kwargs):
"""
Returns Version object or log.
It will return the Version object if found for the given kwargs,
otherwise it will log a warning along with all provided kwargs.
"""
try:
return super().get(**kwargs)
except ObjectDoesNotExist:
log.warning("Version not found for given kwargs.", kwargs=kwargs)
| VersionManager |
python | bokeh__bokeh | src/bokeh/core/property/dataspec.py | {
"start": 11067,
"end": 12291
} | class ____(DataSpec):
""" A |DataSpec| property that accepts font-size fixed values.
The ``FontSizeSpec`` property attempts to first interpret string values as
font sizes (i.e. valid CSS length values). Otherwise, string values are
interpreted as field names. For example:
.. code-block:: python
m.font_size = "13px" # value
m.font_size = "1.5em" # value
m.font_size = "foo" # field
A full list of all valid CSS length units can be found here:
https://drafts.csswg.org/css-values/#lengths
"""
def __init__(self, default, *, help: str | None = None) -> None:
super().__init__(FontSize, default=default, help=help)
def validate(self, value: Any, detail: bool = True) -> None:
# We want to preserve existing semantics and be a little more restrictive. This
# validations makes m.font_size = "" or m.font_size = "6" an error
super().validate(value, detail)
if isinstance(value, str):
if len(value) == 0 or (value[0].isdigit() and not CSS_LENGTH_RE.match(value)):
msg = "" if not detail else f"{value!r} is not a valid font size value"
raise ValueError(msg)
| FontSizeSpec |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 43486,
"end": 43697
} | class ____(Structure):
_fields_ = (
("cputype", cpu_type_t),
("cpusubtype", cpu_subtype_t),
("offset", p_uint32),
("size", p_uint32),
("align", p_uint32),
)
| fat_arch |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 23422,
"end": 23791
} | class ____(Benchmark):
param_names = ['method']
params = [
["CD", "WD", "MD", "L2-star",]
]
def setup(self, method):
rng = np.random.default_rng(1234)
sample = rng.random((1000, 10))
self.sample = sample
def time_discrepancy(self, method):
stats.qmc.discrepancy(self.sample, method=method)
| BenchQMCDiscrepancy |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_circulant_test.py | {
"start": 15846,
"end": 23285
} | class ____(
LinearOperatorCirculantBaseTest,
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Test of LinearOperatorCirculant when the spectrum is not Hermitian.
Non-Hermitian spectrum <==> Complex valued operator.
We test only complex dtypes here.
"""
@staticmethod
def dtypes_to_test():
return [dtypes.complex64, dtypes.complex128]
# Skip Cholesky since we are explicitly testing non-hermitian
# spectra.
@staticmethod
def skip_these_tests():
return ["cholesky", "eigvalsh"]
@staticmethod
def optional_tests():
"""List of optional test names to run."""
return [
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
def operator_and_matrix(self,
shape_info,
dtype,
use_placeholder,
ensure_self_adjoint_and_pd=False):
del ensure_self_adjoint_and_pd
shape = shape_info.shape
# Will be well conditioned enough to get accurate solves.
spectrum = linear_operator_test_util.random_sign_uniform(
shape=self._shape_to_spectrum_shape(shape),
dtype=dtype,
minval=1.,
maxval=2.)
lin_op_spectrum = spectrum
if use_placeholder:
lin_op_spectrum = array_ops.placeholder_with_default(spectrum, shape=None)
operator = linalg.LinearOperatorCirculant(
lin_op_spectrum, input_output_dtype=dtype)
self.assertEqual(
operator.parameters,
{
"input_output_dtype": dtype,
"is_non_singular": None,
"is_positive_definite": None,
"is_self_adjoint": None,
"is_square": True,
"name": "LinearOperatorCirculant",
"spectrum": lin_op_spectrum,
})
mat = self._spectrum_to_circulant_1d(spectrum, shape, dtype=dtype)
return operator, mat
@test_util.disable_xla("No registered Const")
def test_simple_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_adjoint_output(self):
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
with self.cached_session():
op_adjoint = operator.adjoint()
self.assertIsInstance(op_adjoint, linalg.LinearOperatorCirculant)
self.assertTrue(op_adjoint.spectrum.dtype.is_complex)
def test_inverse_output(self):
spectrum = math_ops.cast([1. + 0j, 1j, -1j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
with self.cached_session():
op_inverse = operator.inverse()
self.assertIsInstance(op_inverse, linalg.LinearOperatorCirculant)
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.cached_session() as sess:
spectrum = math_ops.cast([6., 4, 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix, matrix_h = sess.run(
[operator.to_dense(),
linalg.adjoint(operator.to_dense())])
self.assertAllClose(matrix, matrix_h)
self.evaluate(operator.assert_positive_definite()) # Should not fail
self.evaluate(operator.assert_self_adjoint()) # Should not fail
def test_defining_operator_using_real_convolution_kernel(self):
with self.cached_session():
convolution_kernel = [1., 2., 1.]
spectrum = fft_ops.fft(
math_ops.cast(convolution_kernel, dtypes.complex64))
# spectrum is shape [3] ==> operator is shape [3, 3]
# spectrum is Hermitian ==> operator is real.
operator = linalg.LinearOperatorCirculant(spectrum)
# Allow for complex output so we can make sure it has zero imag part.
self.assertEqual(operator.dtype, dtypes.complex64)
matrix = self.evaluate(operator.to_dense())
np.testing.assert_allclose(0, np.imag(matrix), atol=1e-6)
@test_util.run_v1_only("currently failing on v2")
def test_hermitian_spectrum_gives_operator_with_zero_imag_part(self):
with self.cached_session():
# Make spectrum the FFT of a real convolution kernel h. This ensures that
# spectrum is Hermitian.
h = linear_operator_test_util.random_normal(shape=(3, 4))
spectrum = fft_ops.fft(math_ops.cast(h, dtypes.complex64))
operator = linalg.LinearOperatorCirculant(
spectrum, input_output_dtype=dtypes.complex64)
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
with self.cached_session():
operator = linalg.LinearOperatorCirculant(spectrum)
h = operator.convolution_kernel()
c = operator.to_dense()
self.assertAllEqual((2, 3), h.shape)
self.assertAllEqual((2, 3, 3), c.shape)
self.assertAllClose(self.evaluate(h), self.evaluate(c)[:, :, 0])
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0 + 0j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Singular operator"):
self.evaluate(operator.assert_non_singular())
def test_assert_non_singular_does_not_fail_for_non_singular_operator(self):
spectrum = math_ops.cast([-3j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
self.evaluate(operator.assert_non_singular()) # Should not fail
def test_assert_positive_definite_fails_for_non_positive_definite(self):
spectrum = math_ops.cast([6. + 0j, 4 + 0j, 2j], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
with self.assertRaisesOpError("Not positive definite"):
self.evaluate(operator.assert_positive_definite())
def test_assert_positive_definite_does_not_fail_when_pos_def(self):
spectrum = math_ops.cast([6. + 0j, 4 + 0j, 2j + 2], dtypes.complex64)
operator = linalg.LinearOperatorCirculant(spectrum)
with self.cached_session():
self.evaluate(operator.assert_positive_definite()) # Should not fail
def test_real_spectrum_and_not_self_adjoint_hint_raises(self):
spectrum = [1., 2.]
with self.assertRaisesRegex(ValueError, "real.*always.*self-adjoint"):
linalg.LinearOperatorCirculant(spectrum, is_self_adjoint=False)
def test_real_spectrum_auto_sets_is_self_adjoint_to_true(self):
spectrum = [1., 2.]
operator = linalg.LinearOperatorCirculant(spectrum)
self.assertTrue(operator.is_self_adjoint)
@test_util.run_all_in_graph_and_eager_modes
| LinearOperatorCirculantTestNonHermitianSpectrum |
python | huggingface__transformers | tests/models/lilt/test_modeling_lilt.py | {
"start": 1141,
"end": 7529
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=24,
num_hidden_layers=2,
num_attention_heads=6,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
scope=None,
range_bbox=1000,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.scope = scope
self.range_bbox = range_bbox
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
t = bbox[i, j, 3]
bbox[i, j, 3] = bbox[i, j, 1]
bbox[i, j, 1] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
t = bbox[i, j, 2]
bbox[i, j, 2] = bbox[i, j, 0]
bbox[i, j, 0] = t
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def get_config(self):
return LiltConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
)
def create_and_check_model(
self,
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
model = LiltModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox, token_type_ids=token_type_ids)
result = model(input_ids, bbox=bbox)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_token_classification(
self,
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
config.num_labels = self.num_labels
model = LiltForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids, bbox=bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self,
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
):
model = LiltForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
bbox=bbox,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
bbox,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
| LiltModelTester |
python | huggingface__transformers | src/transformers/models/mamba2/modeling_mamba2.py | {
"start": 36766,
"end": 41910
} | class ____(Mamba2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.layers = nn.ModuleList([Mamba2Block(config, layer_idx=idx) for idx in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
self.norm_f = Mamba2RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon)
# Initialize weights and apply final processing
self._register_load_state_dict_pre_hook(self.load_hook)
self.post_init()
def load_hook(self, state_dict, prefix, *args):
for k in state_dict:
if "embedding." in k:
state_dict[k.replace("embedding.", "embeddings.")] = state_dict.pop(k)
break
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
cache_params: Optional[Mamba2Cache] = None,
use_cache: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple, Mamba2Output]:
r"""
cache_params (`Mamba2Cache`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
use_cache (`bool`, *optional*):
If set to `True`, the `cache_params` is returned and can be used to quickly generate the next logits.
cache_position (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
The position of the current input in the cache. This is used to ensure that the cache is correctly updated.
If `cache_params` is passed, `cache_position` should also be passed.
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None): # ^ is python for xor
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
if self.gradient_checkpointing and self.training and use_cache:
use_cache = False
if use_cache:
if cache_params is None:
cache_params = Mamba2Cache(
self.config, inputs_embeds.size(0), device=inputs_embeds.device, dtype=inputs_embeds.dtype
)
cache_position = torch.arange(0, self.config.conv_kernel, device=inputs_embeds.device)
elif cache_position is None:
# cases when we do manual forward instead of using `model.generate` which will initiate
# `cache_position` and makes sure it is not None, throw error here instead of doing some
# hack to conjecture the current cache position
raise ValueError(
"You have to specify the `cache_position` manually when `use_cache=True` and `cache_params` is passed, "
"you don't have to pass a `cache_params` if you are in prefilling stage because in that case it will "
"be initialized for you automatically"
)
else:
cache_params = None
hidden_states = inputs_embeds
all_hidden_states = () if output_hidden_states else None
for mixer_block in self.layers:
hidden_states = mixer_block(
hidden_states,
cache_params=cache_params,
cache_position=cache_position,
attention_mask=attention_mask,
)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.norm_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, cache_params, all_hidden_states] if v is not None)
return Mamba2Output(
last_hidden_state=hidden_states,
cache_params=cache_params if use_cache else None,
hidden_states=all_hidden_states,
)
@auto_docstring(
custom_intro="""
The MAMBA2 Model transformer with a language modeling head on top (linear layer with weights not tied to the input
embeddings).
"""
)
| Mamba2Model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.