language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spyder-ide__spyder | spyder/utils/stylesheet.py | {
"start": 11722,
"end": 13004
} | class ____(SpyderStyleSheet):
"""Stylesheet for application toolbars."""
BUTTON_WIDTH = '47px'
BUTTON_HEIGHT = '47px'
BUTTON_MARGIN_LEFT = '3px'
BUTTON_MARGIN_RIGHT = '3px'
def set_stylesheet(self):
css = self.get_stylesheet()
# Main background color
css.QToolBar.setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_4
)
# Adjust QToolButton to follow the main toolbar style.
css.QToolButton.setValues(
width=self.BUTTON_WIDTH,
height=self.BUTTON_HEIGHT,
marginLeft=self.BUTTON_MARGIN_RIGHT,
marginRight=self.BUTTON_MARGIN_RIGHT,
border='0px',
borderRadius='0px',
padding='0px',
)
for state in ['hover', 'pressed', 'checked', 'checked:hover']:
if state == 'hover':
color = SpyderPalette.COLOR_BACKGROUND_5
else:
color = SpyderPalette.COLOR_BACKGROUND_6
css[f'QToolBar QToolButton:{state}'].setValues(
backgroundColor=color
)
# Remove indicator for popup mode
css['QToolBar QToolButton::menu-indicator'].setValues(
image='none'
)
| ApplicationToolbarStylesheet |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 17198,
"end": 18019
} | class ____(BaseModel):
"""
Asset event schema with fields that are needed for Runtime.
"""
id: Annotated[int, Field(title="Id")]
timestamp: Annotated[AwareDatetime, Field(title="Timestamp")]
extra: Annotated[dict[str, JsonValue] | None, Field(title="Extra")] = None
asset: AssetResponse
created_dagruns: Annotated[list[DagRunAssetReference], Field(title="Created Dagruns")]
source_task_id: Annotated[str | None, Field(title="Source Task Id")] = None
source_dag_id: Annotated[str | None, Field(title="Source Dag Id")] = None
source_run_id: Annotated[str | None, Field(title="Source Run Id")] = None
source_map_index: Annotated[int | None, Field(title="Source Map Index")] = None
partition_key: Annotated[str | None, Field(title="Partition Key")] = None
| AssetEventResponse |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_ellipsis.py | {
"start": 1836,
"end": 2933
} | class ____(List[int]):
@overload
def __getitem__(self, index: int) -> int: ...
@overload
def __getitem__(self, index: slice) -> List[int]: ...
def __getitem__(self, index: Union[int, slice]) -> Union[int, List[int]]:
if isinstance(index, int):
...
elif isinstance(index, slice):
...
else:
raise TypeError(...)
# Ellipsis is allowed as a default argument
def func_with_ellipsis_default_arg(a = ...) -> None:
"Some docstring."
# Ignore if the ellipsis is inside a container:
my_list = [...]
my_tuple = (...,)
my_set = {...}
# Ellipsis inside a container which is a value in a dictionary
mydict1 = {'x': [...]}
mydict2 = {'x': {...}}
mydict3 = {'x': (...,)}
# Ignore if the ellipsis is used with a lambda expression
print("x", lambda: ...)
def func1(val1, _):
if val1 is not ...:
pass
def func2(val1, val2):
"""Ignore if ellipsis is used on comparisons.
See https://github.com/pylint-dev/pylint/issues/6071."""
if val1 is not ... and val2:
pass
assert "x" != ...
| MyIntegerList |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/versions/v2025_08_10.py | {
"start": 1912,
"end": 2315
} | class ____(VersionChange):
"""Add the `include_prior_dates` field to GetXComSliceFilterParams and GetXcomFilterParams."""
description = __doc__
instructions_to_migrate_to_previous_version = (
schema(GetXComSliceFilterParams).field("include_prior_dates").didnt_exist,
schema(GetXcomFilterParams).field("include_prior_dates").didnt_exist,
)
| AddIncludePriorDatesToGetXComSlice |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 17777,
"end": 18379
} | class ____:
"""Descriptor for getting and setting keyline information."""
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> tuple[CanvasLineType, Color]:
return obj.get_rule("keyline", ("none", TRANSPARENT)) # type: ignore[return-value]
def __set__(self, obj: StylesBase, keyline: tuple[str, Color] | None):
if keyline is None:
if obj.clear_rule("keyline"):
obj.refresh(layout=True)
else:
if obj.set_rule("keyline", keyline):
obj.refresh(layout=True)
| KeylineProperty |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_common_errors.py | {
"start": 3155,
"end": 5083
} | class ____:
"""Test detection of incorrect indentation in docstrings."""
# Using function-based validation approach
def test_incorrect_parameter_indentation(self):
"""Test detection of incorrect parameter description indentation."""
docstring = '''"""Function with incorrect parameter indentation.
Args:
param1: Description not indented # Should be indented
param2: Another description not indented # Should be indented
Returns:
Correct indentation here
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect indentation issues in RST processing
assert result.has_warnings() or result.has_errors()
def test_mixed_indentation_levels(self):
"""Test detection of mixed indentation levels."""
docstring = '''"""Function with mixed indentation levels.
Args:
param1: First parameter with correct indentation
param2: Second parameter with incorrect indentation
param3: Third parameter with too much indentation
Returns:
Description of return value
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect indentation inconsistencies
assert result.has_warnings() or result.has_errors()
def test_section_content_not_indented(self):
"""Test detection of section content that's not properly indented."""
docstring = '''"""Function with section content not indented.
Args:
param1: This should be indented under Args
param2: This should also be indented
Returns:
The return description should be indented
"""'''
result = validate_docstring_text(docstring, "test.function")
# Should detect indentation issues
assert result.has_warnings() or result.has_errors()
| TestIndentationErrors |
python | django__django | django/template/base.py | {
"start": 39957,
"end": 40440
} | class ____(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
return SafeString("".join([node.render_annotated(context) for node in self]))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
| NodeList |
python | langchain-ai__langchain | libs/core/langchain_core/tools/retriever.py | {
"start": 520,
"end": 3791
} | class ____(BaseModel):
"""Input to the retriever."""
query: str = Field(description="query to look up in retriever")
def _get_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> str | tuple[str, list[Document]]:
docs = retriever.invoke(query, config={"callbacks": callbacks})
content = document_separator.join(
format_document(doc, document_prompt) for doc in docs
)
if response_format == "content_and_artifact":
return (content, docs)
return content
async def _aget_relevant_documents(
query: str,
retriever: BaseRetriever,
document_prompt: BasePromptTemplate,
document_separator: str,
callbacks: Callbacks = None,
response_format: Literal["content", "content_and_artifact"] = "content",
) -> str | tuple[str, list[Document]]:
docs = await retriever.ainvoke(query, config={"callbacks": callbacks})
content = document_separator.join(
[await aformat_document(doc, document_prompt) for doc in docs]
)
if response_format == "content_and_artifact":
return (content, docs)
return content
def create_retriever_tool(
retriever: BaseRetriever,
name: str,
description: str,
*,
document_prompt: BasePromptTemplate | None = None,
document_separator: str = "\n\n",
response_format: Literal["content", "content_and_artifact"] = "content",
) -> Tool:
r"""Create a tool to do retrieval of documents.
Args:
retriever: The retriever to use for the retrieval
name: The name for the tool. This will be passed to the language model,
so should be unique and somewhat descriptive.
description: The description for the tool. This will be passed to the language
model, so should be descriptive.
document_prompt: The prompt to use for the document.
document_separator: The separator to use between documents.
response_format: The tool response format.
If `"content"` then the output of the tool is interpreted as the contents of
a `ToolMessage`. If `"content_and_artifact"` then the output is expected to
be a two-tuple corresponding to the `(content, artifact)` of a `ToolMessage`
(artifact being a list of documents in this case).
Returns:
Tool class to pass to an agent.
"""
document_prompt = document_prompt or PromptTemplate.from_template("{page_content}")
func = partial(
_get_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
afunc = partial(
_aget_relevant_documents,
retriever=retriever,
document_prompt=document_prompt,
document_separator=document_separator,
response_format=response_format,
)
return Tool(
name=name,
description=description,
func=func,
coroutine=afunc,
args_schema=RetrieverInput,
response_format=response_format,
)
| RetrieverInput |
python | django__django | tests/composite_pk/test_update.py | {
"start": 208,
"end": 7663
} | class ____(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.tenant_1 = Tenant.objects.create(name="A")
cls.tenant_2 = Tenant.objects.create(name="B")
cls.user_1 = User.objects.create(
tenant=cls.tenant_1,
id=1,
email="user0001@example.com",
)
cls.user_2 = User.objects.create(
tenant=cls.tenant_1,
id=2,
email="user0002@example.com",
)
cls.user_3 = User.objects.create(
tenant=cls.tenant_2,
id=3,
email="user0003@example.com",
)
cls.comment_1 = Comment.objects.create(id=1, user=cls.user_1)
cls.comment_2 = Comment.objects.create(id=2, user=cls.user_1)
cls.comment_3 = Comment.objects.create(id=3, user=cls.user_2)
cls.token_1 = Token.objects.create(id=1, tenant=cls.tenant_1)
cls.token_2 = Token.objects.create(id=2, tenant=cls.tenant_2)
cls.token_3 = Token.objects.create(id=3, tenant=cls.tenant_1)
cls.token_4 = Token.objects.create(id=4, tenant=cls.tenant_2)
def test_update_user(self):
email = "user9315@example.com"
result = User.objects.filter(pk=self.user_1.pk).update(email=email)
self.assertEqual(result, 1)
user = User.objects.get(pk=self.user_1.pk)
self.assertEqual(user.email, email)
def test_save_user(self):
count = User.objects.count()
email = "user9314@example.com"
user = User.objects.get(pk=self.user_1.pk)
user.email = email
with self.assertNumQueries(1) as ctx:
user.save()
sql = ctx[0]["sql"]
self.assertEqual(sql.count(connection.ops.quote_name("tenant_id")), 1)
self.assertEqual(sql.count(connection.ops.quote_name("id")), 1)
user.refresh_from_db()
self.assertEqual(user.email, email)
user = User.objects.get(pk=self.user_1.pk)
self.assertEqual(user.email, email)
self.assertEqual(count, User.objects.count())
def test_update_fields_deferred(self):
c = Comment.objects.defer("text", "user_id").get(pk=self.comment_1.pk)
c.text = "Hello"
with self.assertNumQueries(1) as ctx:
c.save()
sql = ctx[0]["sql"]
self.assertEqual(sql.count(connection.ops.quote_name("tenant_id")), 1)
self.assertEqual(sql.count(connection.ops.quote_name("comment_id")), 1)
c = Comment.objects.get(pk=self.comment_1.pk)
self.assertEqual(c.text, "Hello")
def test_update_fields_pk_field(self):
msg = (
"The following fields do not exist in this model, are m2m fields, "
"primary keys, or are non-concrete fields: id"
)
with self.assertRaisesMessage(ValueError, msg):
self.user_1.save(update_fields=["id"])
def test_bulk_update_comments(self):
comment_1 = Comment.objects.get(pk=self.comment_1.pk)
comment_2 = Comment.objects.get(pk=self.comment_2.pk)
comment_3 = Comment.objects.get(pk=self.comment_3.pk)
comment_1.text = "foo"
comment_2.text = "bar"
comment_3.text = "baz"
result = Comment.objects.bulk_update(
[comment_1, comment_2, comment_3], ["text"]
)
self.assertEqual(result, 3)
comment_1 = Comment.objects.get(pk=self.comment_1.pk)
comment_2 = Comment.objects.get(pk=self.comment_2.pk)
comment_3 = Comment.objects.get(pk=self.comment_3.pk)
self.assertEqual(comment_1.text, "foo")
self.assertEqual(comment_2.text, "bar")
self.assertEqual(comment_3.text, "baz")
def test_bulk_update_primary_key_fields(self):
message = "bulk_update() cannot be used with primary key fields."
with self.assertRaisesMessage(ValueError, message):
Comment.objects.bulk_update([self.comment_1, self.comment_2], ["id"])
def test_update_or_create_user(self):
test_cases = (
{
"pk": self.user_1.pk,
"defaults": {"email": "user3914@example.com"},
},
{
"pk": (self.tenant_1.id, self.user_1.id),
"defaults": {"email": "user9375@example.com"},
},
{
"tenant": self.tenant_1,
"id": self.user_1.id,
"defaults": {"email": "user3517@example.com"},
},
{
"tenant_id": self.tenant_1.id,
"id": self.user_1.id,
"defaults": {"email": "user8391@example.com"},
},
)
for fields in test_cases:
with self.subTest(fields=fields):
count = User.objects.count()
user, created = User.objects.update_or_create(**fields)
self.assertIs(created, False)
self.assertEqual(user.id, self.user_1.id)
self.assertEqual(user.pk, (self.tenant_1.id, self.user_1.id))
self.assertEqual(user.tenant_id, self.tenant_1.id)
self.assertEqual(user.email, fields["defaults"]["email"])
self.assertEqual(count, User.objects.count())
def test_update_or_create_with_pre_save_pk_field(self):
t = TimeStamped.objects.create(id=1)
self.assertEqual(TimeStamped.objects.count(), 1)
t, created = TimeStamped.objects.update_or_create(
pk=t.pk, defaults={"text": "new text"}
)
self.assertIs(created, False)
self.assertEqual(TimeStamped.objects.count(), 1)
self.assertEqual(t.text, "new text")
def test_update_comment_by_user_email(self):
result = Comment.objects.filter(user__email=self.user_1.email).update(
text="foo"
)
self.assertEqual(result, 2)
comment_1 = Comment.objects.get(pk=self.comment_1.pk)
comment_2 = Comment.objects.get(pk=self.comment_2.pk)
self.assertEqual(comment_1.text, "foo")
self.assertEqual(comment_2.text, "foo")
def test_update_token_by_tenant_name(self):
result = Token.objects.filter(tenant__name="A").update(secret="bar")
self.assertEqual(result, 2)
token_1 = Token.objects.get(pk=self.token_1.pk)
self.assertEqual(token_1.secret, "bar")
token_3 = Token.objects.get(pk=self.token_3.pk)
self.assertEqual(token_3.secret, "bar")
def test_cant_update_relation(self):
msg = (
"Cannot update model field <django.db.models.fields.related.ForeignObject: "
"user> (only concrete fields are permitted)"
)
with self.assertRaisesMessage(FieldError, msg):
Comment.objects.update(user=self.user_1)
with self.assertRaisesMessage(FieldError, msg):
Comment.objects.update(user=User())
def test_cant_update_pk_field(self):
qs = Comment.objects.filter(user__email=self.user_1.email)
msg = "Composite primary key fields must be updated individually."
with self.assertRaisesMessage(FieldError, msg):
qs.update(pk=(1, 10))
def test_update_value_not_composite(self):
msg = (
"Composite primary keys expressions are not allowed in this "
"query (text=F('pk'))."
)
with self.assertRaisesMessage(FieldError, msg):
Comment.objects.update(text=F("pk"))
| CompositePKUpdateTests |
python | getsentry__sentry | src/sentry/types/activity.py | {
"start": 24,
"end": 2378
} | class ____(Enum):
SET_RESOLVED = 1
SET_UNRESOLVED = 2
SET_IGNORED = 3
SET_PUBLIC = 4
SET_PRIVATE = 5
SET_REGRESSION = 6
CREATE_ISSUE = 7
NOTE = 8
FIRST_SEEN = 9
RELEASE = 10
ASSIGNED = 11
UNASSIGNED = 12
SET_RESOLVED_IN_RELEASE = 13
MERGE = 14
SET_RESOLVED_BY_AGE = 15
SET_RESOLVED_IN_COMMIT = 16
DEPLOY = 17
NEW_PROCESSING_ISSUES = 18
UNMERGE_SOURCE = 19
UNMERGE_DESTINATION = 20
SET_RESOLVED_IN_PULL_REQUEST = 21
# The user has reprocessed the group, so events may have moved to new groups
REPROCESS = 22
MARK_REVIEWED = 23
AUTO_SET_ONGOING = 24
SET_ESCALATING = 25
SET_PRIORITY = 26
DELETED_ATTACHMENT = 27
# Warning: This must remain in this EXACT order.
CHOICES = tuple(
(i.value, i.name.lower())
for i in [
ActivityType.SET_RESOLVED, # 1
ActivityType.SET_RESOLVED_BY_AGE, # 15
ActivityType.SET_RESOLVED_IN_RELEASE, # 13
ActivityType.SET_RESOLVED_IN_COMMIT, # 16
ActivityType.SET_RESOLVED_IN_PULL_REQUEST, # 21
ActivityType.SET_UNRESOLVED, # 2
ActivityType.SET_IGNORED, # 3
ActivityType.SET_PUBLIC, # 4
ActivityType.SET_PRIVATE, # 5
ActivityType.SET_REGRESSION, # 6
ActivityType.CREATE_ISSUE, # 7
ActivityType.NOTE, # 8
ActivityType.FIRST_SEEN, # 9
ActivityType.RELEASE, # 10
ActivityType.ASSIGNED, # 11
ActivityType.UNASSIGNED, # 12
ActivityType.MERGE, # 14
ActivityType.DEPLOY, # 17
ActivityType.NEW_PROCESSING_ISSUES, # 18
ActivityType.UNMERGE_SOURCE, # 19
ActivityType.UNMERGE_DESTINATION, # 20
ActivityType.REPROCESS, # 22
ActivityType.MARK_REVIEWED, # 23
ActivityType.AUTO_SET_ONGOING, # 24
ActivityType.SET_ESCALATING, # 25
ActivityType.SET_PRIORITY, # 26
ActivityType.DELETED_ATTACHMENT, # 27
]
)
STATUS_CHANGE_ACTIVITY_TYPES = (
ActivityType.SET_RESOLVED,
ActivityType.SET_UNRESOLVED,
ActivityType.SET_IGNORED,
ActivityType.SET_REGRESSION,
ActivityType.SET_RESOLVED_IN_RELEASE,
ActivityType.SET_RESOLVED_BY_AGE,
ActivityType.SET_RESOLVED_IN_COMMIT,
ActivityType.SET_RESOLVED_IN_PULL_REQUEST,
ActivityType.SET_ESCALATING,
)
| ActivityType |
python | pytorch__pytorch | tools/linter/adapters/ruff_linter.py | {
"start": 360,
"end": 565
} | class ____(str, enum.Enum):
"""Severity of a lint message."""
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
@dataclasses.dataclass(frozen=True)
| LintSeverity |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/openai_tools.py | {
"start": 9969,
"end": 12815
} | class ____(JsonOutputToolsParser):
"""Parse tools from OpenAI response."""
tools: Annotated[list[TypeBaseModel], SkipValidation()]
"""The tools to parse."""
# TODO: Support more granular streaming of objects. Currently only streams once all
# Pydantic object fields are present.
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a list of Pydantic objects.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON.
If `True`, the output will be a JSON object containing
all the keys that have been returned so far.
If `False`, the output will be the full JSON object.
Returns:
The parsed Pydantic objects.
Raises:
ValueError: If the tool call arguments are not a dict.
ValidationError: If the tool call arguments do not conform
to the Pydantic model.
"""
json_results = super().parse_result(result, partial=partial)
if not json_results:
return None if self.first_tool_only else []
json_results = [json_results] if self.first_tool_only else json_results
name_dict_v2: dict[str, TypeBaseModel] = {
tool.model_config.get("title") or tool.__name__: tool
for tool in self.tools
if is_pydantic_v2_subclass(tool)
}
name_dict_v1: dict[str, TypeBaseModel] = {
tool.__name__: tool for tool in self.tools if is_pydantic_v1_subclass(tool)
}
name_dict: dict[str, TypeBaseModel] = {**name_dict_v2, **name_dict_v1}
pydantic_objects = []
for res in json_results:
if not isinstance(res["args"], dict):
if partial:
continue
msg = (
f"Tool arguments must be specified as a dict, received: "
f"{res['args']}"
)
raise ValueError(msg)
try:
pydantic_objects.append(name_dict[res["type"]](**res["args"]))
except (ValidationError, ValueError):
if partial:
continue
has_max_tokens_stop_reason = any(
generation.message.response_metadata.get("stop_reason")
== "max_tokens"
for generation in result
if isinstance(generation, ChatGeneration)
)
if has_max_tokens_stop_reason:
logger.exception(_MAX_TOKENS_ERROR)
raise
if self.first_tool_only:
return pydantic_objects[0] if pydantic_objects else None
return pydantic_objects
| PydanticToolsParser |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 84371,
"end": 86375
} | class ____:
def __init__(self, t, c, k=3):
"""Tensor product spline object.
c[i1, i2, ..., id] * B(x1, i1) * B(x2, i2) * ... * B(xd, id)
Parameters
----------
c : ndarray, shape (n1, n2, ..., nd, ...)
b-spline coefficients
t : tuple of 1D ndarrays
knot vectors in directions 1, 2, ... d
``len(t[i]) == n[i] + k + 1``
k : int or length-d tuple of integers
spline degrees.
"""
ndim = len(t)
assert ndim <= len(c.shape)
try:
len(k)
except TypeError:
# make k a tuple
k = (k,)*ndim
self.k = tuple(operator.index(ki) for ki in k)
self.t = tuple(np.asarray(ti, dtype=float) for ti in t)
self.c = c
def __call__(self, x):
ndim = len(self.t)
# a single evaluation point: `x` is a 1D array_like, shape (ndim,)
assert len(x) == ndim
# get the indices in an ndim-dimensional vector
i = ['none', ]*ndim
for d in range(ndim):
td, xd = self.t[d], x[d]
k = self.k[d]
# find the index for x[d]
if xd == td[k]:
i[d] = k
else:
i[d] = np.searchsorted(td, xd) - 1
assert td[i[d]] <= xd <= td[i[d]+1]
assert i[d] >= k and i[d] < len(td) - k
i = tuple(i)
# iterate over the dimensions, form linear combinations of
# products B(x_1) * B(x_2) * ... B(x_N) of (k+1)**N b-splines
# which are non-zero at `i = (i_1, i_2, ..., i_N)`.
result = 0
iters = [range(i[d] - self.k[d], i[d] + 1) for d in range(ndim)]
for idx in itertools.product(*iters):
term = self.c[idx] * np.prod([B(x[d], self.k[d], idx[d], self.t[d])
for d in range(ndim)])
result += term
return np.asarray(result)
@make_xp_test_case(NdBSpline)
| NdBSpline0 |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_date_1904_02.py | {
"start": 342,
"end": 1348
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("date_1904_02.xlsx")
def test_create_file(self):
"""Test the creation of a XlsxWriter file with date times in 1900 and1904 epochs."""
workbook = Workbook(self.got_filename, {"date_1904": True})
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"num_format": 14})
worksheet.set_column("A:A", 12)
worksheet.write_datetime("A1", date(1904, 1, 1), format1)
worksheet.write_datetime("A2", date(1906, 9, 27), format1)
worksheet.write_datetime("A3", date(1917, 9, 9), format1)
worksheet.write_datetime("A4", date(1931, 5, 19), format1)
worksheet.write_datetime("A5", date(2177, 10, 15), format1)
worksheet.write_datetime("A6", date(4641, 11, 27), format1)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_bedrock.py | {
"start": 27217,
"end": 29197
} | class ____:
JOB_NAME = "job_name"
ROLE_ARN = "role_arn"
MODEL_ID = "model_id"
INPUT_URI = "input_uri"
OUTPUT_URI = "output_uri"
INVOKE_KWARGS = {"tags": {"key": "key", "value": "value"}}
JOB_ARN = "job_arn"
@pytest.fixture
def mock_conn(self) -> Generator[BaseAwsConnection, None, None]:
with mock.patch.object(BedrockHook, "conn") as _conn:
_conn.create_model_invocation_job.return_value = {"jobArn": self.JOB_ARN}
yield _conn
@pytest.fixture
def bedrock_hook(self) -> Generator[BedrockHook, None, None]:
with mock_aws():
hook = BedrockHook(aws_conn_id="aws_default")
yield hook
def setup_method(self):
self.operator = BedrockBatchInferenceOperator(
task_id="test_task",
job_name=self.JOB_NAME,
role_arn=self.ROLE_ARN,
model_id=self.MODEL_ID,
input_uri=self.INPUT_URI,
output_uri=self.OUTPUT_URI,
invoke_kwargs=self.INVOKE_KWARGS,
)
self.operator.defer = mock.MagicMock()
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(BedrockHook, "get_waiter")
def test_customize_model_wait_combinations(
self, _, wait_for_completion, deferrable, mock_conn, bedrock_hook
):
self.operator.wait_for_completion = wait_for_completion
self.operator.deferrable = deferrable
response = self.operator.execute({})
assert response == self.JOB_ARN
assert bedrock_hook.get_waiter.call_count == wait_for_completion
assert self.operator.defer.call_count == deferrable
def test_template_fields(self):
validate_template_fields(self.operator)
| TestBedrockBatchInferenceOperator |
python | tensorflow__tensorflow | tensorflow/python/tpu/tests/tpu_embedding_v2_correctness_hd_ragged_forward_test.py | {
"start": 954,
"end": 1441
} | class ____(
tpu_embedding_v2_correctness_base_test.TPUEmbeddingCorrectnessBaseTest):
@parameterized.parameters(
['sgd', 'adagrad', 'adam', 'ftrl', 'adagrad_momentum'])
def test_embedding(self, optimizer_name):
if optimizer_name != 'sgd':
self.skip_if_oss()
self._test_embedding(
optimizer_name, training=False, sparse=False, is_high_dimensional=True)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| TPUEmbeddingCorrectnessTest |
python | ray-project__ray | rllib/core/learner/torch/torch_differentiable_learner.py | {
"start": 930,
"end": 16838
} | class ____(DifferentiableLearner):
"""A `DifferentiableLearner` class leveraging PyTorch for functional updates.
This class utilizes PyTorch 2.0's `func` module to perform functional
updates on the provided parameters.
"""
# Set the framework to `"torch"`.
framework: str = "torch"
def __init__(self, **kwargs):
# First initialize the `DifferentiableLearner` base class to set
# the configurations and `MultiRLModule`.
super().__init__(**kwargs)
# Whether to compile the RL Module of this learner. This implies that the.
# forward_train method of the RL Module will be compiled. Further more,
# other forward methods of the RL Module will be compiled on demand.
# This is assumed to not happen, since other forwrad methods are not expected
# to be used during training.
self._torch_compile_forward_train = False
self._torch_compile_cfg = None
# Whether to compile the `_uncompiled_update` method of this learner. This
# implies that everything within `_uncompiled_update` will be compiled,
# not only the forward_train method of the RL Module.
# Note that this is experimental.
# Note that this requires recompiling the forward methods once we add/remove
# RL Modules.
self._torch_compile_complete_update = False
if self.config.torch_compile_learner:
if (
self.config.torch_compile_learner_what_to_compile
== TorchCompileWhatToCompile.COMPLETE_UPDATE
):
self._torch_compile_complete_update = True
self._compiled_update_initialized = False
else:
self._torch_compile_forward_train = True
self._torch_compile_cfg = TorchCompileConfig(
torch_dynamo_backend=self.config.torch_compile_learner_dynamo_backend,
torch_dynamo_mode=self.config.torch_compile_learner_dynamo_mode,
)
# TODO (simon): See, if we can include these without a torch optimizer.
# self._lr_schedulers = {}
# self._lr_scheduler_classes = None
# if self.config._torch_lr_scheduler_classes:
# self._lr_scheduler_classes = self.config._torch_lr_scheduler_classes
def _uncompiled_update(
self,
batch: Dict,
params: Dict[ModuleID, NamedParamDict],
**kwargs,
) -> Tuple[Any, Any, Dict[ModuleID, NamedParamDict], Any]:
"""Performs a single functional update using a batch of data.
This update operates on parameters passed via a functional call to the
`MultiRLModule` and leverages PyTorch 2.0's `autograd` module. Parameters
are not modified in-place within `self._module`; instead, updates are
applied to the cloned parameters provided.
Args:
batch: A dictionary (or `MultiAgentBatch`) containing training data for
all modules in the `MultiRLModule` (that should be trained).
params: A dictionary of named parameters for each module id.
Returns:
A tuple consisting of:
1) the output of a functional forward call to the RLModule using
`params`,
2) the `loss_per_module` dictionary mapping module IDs to individual
loss tensors,
3) the functionally updated parameters in the (dict) format passed in,
4) a metrics dict mapping module IDs to metrics key/value pairs.
"""
# TODO (sven): Causes weird cuda error when WandB is used.
# Diagnosis thus far:
# - All peek values during metrics.reduce are non-tensors.
# - However, in impala.py::training_step(), a tensor does arrive after learner
# group.update(), so somehow, there is still a race condition
# possible (learner, which performs the reduce() and learner thread, which
# performs the logging of tensors into metrics logger).
self._compute_off_policyness(batch)
# Make a functional forward pass with the provided parameters.
fwd_out = self._make_functional_call(params, batch)
loss_per_module = self.compute_losses(fwd_out=fwd_out, batch=batch)
# Compute gradients for the provided parameters.
gradients = self.compute_gradients(loss_per_module, params)
with contextlib.ExitStack() as stack:
if self.config.num_learners > 1:
for mod in self.module.values():
# Skip non-torch modules, b/c they may not have the `no_sync` API.
if isinstance(mod, torch.nn.Module):
stack.enter_context(mod.no_sync())
# TODO (simon): See, if we need here postprocessing of gradients.
# postprocessed_gradients = self.postprocess_gradients(gradients)
# Make a stateless (of `params`) update of the `RLModule` parameters.
params = self.apply_gradients(gradients, params)
# Deactivate tensor-mode on our MetricsLogger and collect the (tensor)
# results.
return fwd_out, loss_per_module, params, {}
# TODO (simon): Maybe make type for gradients.
@override(DifferentiableLearner)
def compute_gradients(
self,
loss_per_module: Dict[ModuleID, TensorType],
params: Dict[ModuleID, NamedParamDict],
**kwargs,
) -> Dict[ModuleID, NamedParamDict]:
"""Computes functionally gradients based on the given losses.
This method uses `torch.autograd.grad` to make the backward pass on the
`MultiRLModule` which enables a functional backward pass. If a PyTorch
optimizer is needed a differentiable one must be used (e.g. `torchopt`).
Args:
loss_per_module: Dict mapping module IDs to their individual total loss
terms, computed by the individual `compute_loss_for_module()` calls.
The overall total loss (sum of loss terms over all modules) is stored
under `loss_per_module[ALL_MODULES]`
params: A dictionary containing named parameters for each module id.
**kwargs: Forward compatibility kwargs.
Returns:
The (named) gradients in the same (dict) format as `params`.
"""
# TODO (simon): Add grad scalers later.
total_loss = sum(loss_per_module.values())
# Use `torch`'s `autograd` to compute gradients and create a graph, so we can
# compute higher order gradients. Allow specified inputs not being used in outputs
# as probably not all modules/parameters of a `MultiRLModule` are used in the loss.
# Note, parameters are named parameters as this is needed by the
# `torch.func.functional_call`
# TODO (simon): Make sure this works for `MultiRLModule`s. This here can have
# all parameter tensors in a list. But the `functional_call` above needs named
# parameters for each module. Implement this via `foreach_module`.
grads = torch.autograd.grad(
total_loss,
sum((list(param.values()) for mid, param in params.items()), []),
create_graph=True,
retain_graph=True,
allow_unused=True,
)
# Map all gradients to their keys.
named_grads = {
module_id: {
name: grad for (name, _), grad in zip(module_params.items(), grads)
}
for module_id, module_params in params.items()
}
return named_grads
@override(DifferentiableLearner)
def apply_gradients(
self,
gradients: Dict[ModuleID, NamedParamDict],
params: Dict[ModuleID, NamedParamDict],
) -> Dict[ModuleID, NamedParamDict]:
"""Applies the given gradients in a functional manner.
This method requires functional parameter updates, meaning modifications
must not be performed in-place (e.g., using an optimizer or directly within
the `MultiRLModule`).
Args:
gradients: A dictionary containing named gradients for each module id.
params: A dictionary containing named parameters for each module id.
Returns:
The updated parameters in the same (dict) format as `params`.
"""
policies_to_update = self.learner_config.policies_to_update or list(
gradients.keys()
)
# Note, because this is a functional update we cannot apply in-place
# modifications of parameters.
updated_params = {}
for module_id, module_grads in gradients.items():
if module_id not in policies_to_update:
updated_params[module_id] = params[module_id]
continue
updated_params[module_id] = {}
for name, grad in module_grads.items():
# If updates should not be skipped turn `nan` and `inf` gradients to zero.
if (
not self.config.torch_skip_nan_gradients
and not torch.isfinite(grad).all()
):
# Warn the user about `nan` gradients.
logger.warning(f"Gradients {name} contain `nan/inf` values.")
# If updates should be skipped, do not step the optimizer and return.
if not self.config.torch_skip_nan_gradients:
logger.warning(
"Setting `nan/inf` gradients to zero. If updates with "
"`nan/inf` gradients should not be set to zero and instead "
"the update be skipped entirely set `torch_skip_nan_gradients` "
"to `True`."
)
# If necessary turn `nan` gradients to zero. Note, this can corrupt the
# internal state of the optimizer, if many `nan` gradients occur.
grad = torch.nan_to_num(grad)
if self.config.torch_skip_nan_gradients or torch.isfinite(grad).all():
# Update each parameter, by a simple gradient descent step.
updated_params[module_id][name] = (
params[module_id][name] - self.learner_config.lr * grad
)
elif grad is None or not torch.isfinite(grad).all():
logger.warning(
"Skipping this update. If updates with `nan/inf` gradients "
"should not be skipped entirely and instead `nan/inf` "
"gradients set to `zero` set `torch_skip_nan_gradients` to "
"`False`."
)
return updated_params
def _make_functional_call(
self, params: Dict[ModuleID, NamedParamDict], batch: MultiAgentBatch
) -> Dict[ModuleID, NamedParamDict]:
"""Makes a functional call for each module in the `MultiRLModule`."""
return self._module.foreach_module(
lambda mid, m: torch.func.functional_call(m, params[mid], batch[mid]),
return_dict=True,
)
@override(DifferentiableLearner)
def _get_tensor_variable(
self, value, dtype=None, trainable=False
) -> "torch.Tensor":
tensor = torch.tensor(
value,
requires_grad=trainable,
# TODO (simon): Make GPU-trainable.
# device=self._device,
dtype=(
dtype
or (
torch.float32
if isinstance(value, float)
else torch.int32
if isinstance(value, int)
else None
)
),
)
return nn.Parameter(tensor) if trainable else tensor
def _convert_batch_type(
self,
batch: MultiAgentBatch,
to_device: bool = True,
pin_memory: bool = False,
use_stream: bool = False,
) -> MultiAgentBatch:
batch = convert_to_torch_tensor(
batch.policy_batches,
device=self._device if to_device else None,
pin_memory=pin_memory,
use_stream=use_stream,
)
# TODO (sven): This computation of `env_steps` is not accurate!
length = max(len(b) for b in batch.values())
batch = MultiAgentBatch(batch, env_steps=length)
return batch
def _compute_off_policyness(self, batch):
# Log off-policy'ness of this batch wrt the current weights.
off_policyness = {
(mid, DIFF_NUM_GRAD_UPDATES_VS_SAMPLER_POLICY): (
(self._weights_seq_no - module_batch[WEIGHTS_SEQ_NO]).float()
)
for mid, module_batch in batch.items()
if WEIGHTS_SEQ_NO in module_batch
}
for key in off_policyness.keys():
mid = key[0]
if Columns.LOSS_MASK not in batch[mid]:
off_policyness[key] = torch.mean(off_policyness[key])
else:
mask = batch[mid][Columns.LOSS_MASK]
num_valid = torch.sum(mask)
off_policyness[key] = torch.sum(off_policyness[key][mask]) / num_valid
self.metrics.log_dict(off_policyness, window=1)
@override(DifferentiableLearner)
def build(self, device: Optional[DeviceType] = None) -> None:
"""Builds the TorchDifferentiableLearner.
This method is specific to TorchDifferentiableLearner. Before running super() it will
initialize the device properly based on `self.config`, so that `_make_module()`
can place the created module on the correct device. After running super() it
wraps the module in a TorchDDPRLModule if `config.num_learners > 0`.
Note, in inherited classes it is advisable to call the parent's `build()`
after setting up all variables because `configure_optimizer_for_module` is
called in this `Learner.build()`.
"""
# TODO (simon): Allow different `DifferentiableLearner` instances in a
# `MetaLearner` to run on different GPUs.
super().build(device=device)
if self._torch_compile_complete_update:
torch._dynamo.reset()
self._compiled_update_initialized = False
self._possibly_compiled_update = torch.compile(
self._uncompiled_update,
backend=self._torch_compile_cfg.torch_dynamo_backend,
mode=self._torch_compile_cfg.torch_dynamo_mode,
**self._torch_compile_cfg.kwargs,
)
# Otherwise, we use the possibly compiled `forward_train` in
# the module, compiled in the `MetaLearner`.
else:
# Nothing, to do.
self._possibly_compiled_update = self._uncompiled_update
@override(DifferentiableLearner)
def _update(
self, batch: Dict[str, Any], params: Dict[ModuleID, NamedParamDict]
) -> Tuple[Any, Dict[ModuleID, NamedParamDict], Any, Any]:
# The first time we call _update after building the learner or
# adding/removing models, we update with the uncompiled update method.
# This makes it so that any variables that may be created during the first
# update step are already there when compiling. More specifically,
# this avoids errors that occur around using defaultdicts with
# torch.compile().
if (
self._torch_compile_complete_update
and not self._compiled_update_initialized
):
self._compiled_update_initialized = True
return self._uncompiled_update(batch, params)
else:
return self._possibly_compiled_update(batch, params)
| TorchDifferentiableLearner |
python | boto__boto3 | tests/unit/dynamodb/test_transform.py | {
"start": 2541,
"end": 12328
} | class ____(BaseTransformationTest):
def setUp(self):
super().setUp()
self.transformation = lambda params: self.transformed_value
self.add_shape({self.target_shape: {'type': 'string'}})
def test_transform_structure(self):
input_params = {
'Structure': {
'TransformMe': self.original_value,
'LeaveAlone': self.original_value,
}
}
input_shape = {
'Structure': {
'type': 'structure',
'members': {
'TransformMe': {'shape': self.target_shape},
'LeaveAlone': {'shape': 'String'},
},
}
}
self.add_input_shape(input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'Structure': {
'TransformMe': self.transformed_value,
'LeaveAlone': self.original_value,
}
}
def test_transform_map(self):
input_params = {
'TransformMe': {'foo': self.original_value},
'LeaveAlone': {'foo': self.original_value},
}
targeted_input_shape = {
'TransformMe': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': self.target_shape},
}
}
untargeted_input_shape = {
'LeaveAlone': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': 'String'},
}
}
self.add_input_shape(targeted_input_shape)
self.add_input_shape(untargeted_input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'TransformMe': {'foo': self.transformed_value},
'LeaveAlone': {'foo': self.original_value},
}
def test_transform_list(self):
input_params = {
'TransformMe': [self.original_value, self.original_value],
'LeaveAlone': [self.original_value, self.original_value],
}
targeted_input_shape = {
'TransformMe': {
'type': 'list',
'member': {'shape': self.target_shape},
}
}
untargeted_input_shape = {
'LeaveAlone': {'type': 'list', 'member': {'shape': 'String'}}
}
self.add_input_shape(targeted_input_shape)
self.add_input_shape(untargeted_input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'TransformMe': [self.transformed_value, self.transformed_value],
'LeaveAlone': [self.original_value, self.original_value],
}
def test_transform_nested_structure(self):
input_params = {
'WrapperStructure': {
'Structure': {
'TransformMe': self.original_value,
'LeaveAlone': self.original_value,
}
}
}
structure_shape = {
'Structure': {
'type': 'structure',
'members': {
'TransformMe': {'shape': self.target_shape},
'LeaveAlone': {'shape': 'String'},
},
}
}
input_shape = {
'WrapperStructure': {
'type': 'structure',
'members': {'Structure': {'shape': 'Structure'}},
}
}
self.add_shape(structure_shape)
self.add_input_shape(input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'WrapperStructure': {
'Structure': {
'TransformMe': self.transformed_value,
'LeaveAlone': self.original_value,
}
}
}
def test_transform_nested_map(self):
input_params = {
'TargetedWrapperMap': {'foo': {'bar': self.original_value}},
'UntargetedWrapperMap': {'foo': {'bar': self.original_value}},
}
targeted_map_shape = {
'TransformMeMap': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': self.target_shape},
}
}
targeted_wrapper_shape = {
'TargetedWrapperMap': {
'type': 'map',
'key': {'shape': 'Name'},
'value': {'shape': 'TransformMeMap'},
}
}
self.add_shape(targeted_map_shape)
self.add_input_shape(targeted_wrapper_shape)
untargeted_map_shape = {
'LeaveAloneMap': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': 'String'},
}
}
untargeted_wrapper_shape = {
'UntargetedWrapperMap': {
'type': 'map',
'key': {'shape': 'Name'},
'value': {'shape': 'LeaveAloneMap'},
}
}
self.add_shape(untargeted_map_shape)
self.add_input_shape(untargeted_wrapper_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'TargetedWrapperMap': {'foo': {'bar': self.transformed_value}},
'UntargetedWrapperMap': {'foo': {'bar': self.original_value}},
}
def test_transform_nested_list(self):
input_params = {
'TargetedWrapperList': [
[self.original_value, self.original_value]
],
'UntargetedWrapperList': [
[self.original_value, self.original_value]
],
}
targeted_list_shape = {
'TransformMe': {
'type': 'list',
'member': {'shape': self.target_shape},
}
}
targeted_wrapper_shape = {
'TargetedWrapperList': {
'type': 'list',
'member': {'shape': 'TransformMe'},
}
}
self.add_shape(targeted_list_shape)
self.add_input_shape(targeted_wrapper_shape)
untargeted_list_shape = {
'LeaveAlone': {'type': 'list', 'member': {'shape': 'String'}}
}
untargeted_wrapper_shape = {
'UntargetedWrapperList': {
'type': 'list',
'member': {'shape': 'LeaveAlone'},
}
}
self.add_shape(untargeted_list_shape)
self.add_input_shape(untargeted_wrapper_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {
'TargetedWrapperList': [
[self.transformed_value, self.transformed_value]
],
'UntargetedWrapperList': [
[self.original_value, self.original_value]
],
}
def test_transform_incorrect_type_for_structure(self):
input_params = {'Structure': 'foo'}
input_shape = {
'Structure': {
'type': 'structure',
'members': {
'TransformMe': {'shape': self.target_shape},
},
}
}
self.add_input_shape(input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {'Structure': 'foo'}
def test_transform_incorrect_type_for_map(self):
input_params = {'Map': 'foo'}
input_shape = {
'Map': {
'type': 'map',
'key': {'shape': 'String'},
'value': {'shape': self.target_shape},
}
}
self.add_input_shape(input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {'Map': 'foo'}
def test_transform_incorrect_type_for_list(self):
input_params = {'List': 'foo'}
input_shape = {
'List': {'type': 'list', 'member': {'shape': self.target_shape}}
}
self.add_input_shape(input_shape)
self.transformer.transform(
params=input_params,
model=self.operation_model.input_shape,
transformation=self.transformation,
target_shape=self.target_shape,
)
assert input_params == {'List': 'foo'}
| TestInputOutputTransformer |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 25933,
"end": 27439
} | class ____(Module):
r"""Applies the Softplus function element-wise.
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Softplus.png
Examples::
>>> m = nn.Softplus()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["beta", "threshold"]
beta: float
threshold: float
def __init__(self, beta: float = 1.0, threshold: float = 20.0) -> None:
super().__init__()
self.beta = beta
self.threshold = threshold
def forward(self, input: Tensor) -> Tensor:
"""
Run forward pass.
"""
return F.softplus(input, self.beta, self.threshold)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"beta={self.beta}, threshold={self.threshold}"
| Softplus |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/language/visitor_meta.py | {
"start": 1912,
"end": 2980
} | class ____(type):
def __new__(cls, name, bases, attrs):
enter_handlers = {}
leave_handlers = {}
for base in bases:
if hasattr(base, '_enter_handlers'):
enter_handlers.update(base._enter_handlers)
if hasattr(base, '_leave_handlers'):
leave_handlers.update(base._leave_handlers)
for attr, val in attrs.items():
if attr.startswith('enter_'):
ast_kind = attr[6:]
ast_type = AST_KIND_TO_TYPE.get(ast_kind)
enter_handlers[ast_type] = val
elif attr.startswith('leave_'):
ast_kind = attr[6:]
ast_type = AST_KIND_TO_TYPE.get(ast_kind)
leave_handlers[ast_type] = val
attrs['_enter_handlers'] = enter_handlers
attrs['_leave_handlers'] = leave_handlers
attrs['_get_enter_handler'] = enter_handlers.get
attrs['_get_leave_handler'] = leave_handlers.get
return super(VisitorMeta, cls).__new__(cls, name, bases, attrs)
| VisitorMeta |
python | keon__algorithms | tests/test_dp.py | {
"start": 4544,
"end": 4797
} | class ____(unittest.TestCase):
def test_longest_increasing_subsequence_optimized(self):
sequence = [1, 101, 10, 2, 3, 100, 4, 6, 2]
self.assertEqual(5, longest_increasing_subsequence(sequence))
| TestLongestIncreasingSubsequenceOptimized |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_gcs.py | {
"start": 10904,
"end": 14827
} | class ____:
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_pass_arguments_to_hook(self, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.return_value = ["NOT_EMPTY_LIST"]
result = task.poke(mock.MagicMock)
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
assert result is True
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_should_return_false_on_empty_list(self, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
)
mock_hook.return_value.list.return_value = []
result = task.poke(mock.MagicMock)
assert result is False
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_execute(self, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
poke_interval=0,
)
generated_messages = [f"test-prefix/obj{i}" for i in range(5)]
mock_hook.return_value.list.return_value = generated_messages
response = task.execute(None)
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.list.assert_called_once_with(TEST_BUCKET, prefix=TEST_PREFIX)
assert response == generated_messages
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_execute_timeout(self, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
poke_interval=0,
timeout=1,
)
mock_hook.return_value.list.return_value = []
with pytest.raises(AirflowException):
task.execute(mock.MagicMock)
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSObjectsWithPrefixExistenceSensor.defer")
def test_gcs_object_prefix_existence_sensor_finish_before_deferred(self, mock_defer, mock_hook):
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
)
mock_hook.return_value.list.return_value = True
task.execute(mock.MagicMock())
assert not mock_defer.called
@mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook")
def test_xcom_value_when_poke_success(self, mock_hook):
mock_hook.return_value.list.return_value = ["test.txt"]
task = GCSObjectsWithPrefixExistenceSensor(
task_id="task-id",
bucket=TEST_BUCKET,
prefix=TEST_PREFIX,
google_cloud_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
deferrable=True,
)
responses = task.execute(None)
assert responses == ["test.txt"]
| TestGoogleCloudStoragePrefixSensor |
python | getsentry__sentry | src/sentry/workflow_engine/types.py | {
"start": 9647,
"end": 11279
} | class ____(ABC, Generic[T]):
@staticmethod
@abstractmethod
def bulk_get_query_object(data_sources) -> dict[int, T | None]:
"""
Bulk fetch related data-source models returning a dict of the
`DataSource.id -> T`.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def related_model(instance) -> list[ModelRelation]:
"""
A list of deletion ModelRelations. The model relation query should map
the source_id field within the related model to the
`instance.source_id`.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_instance_limit(org: Organization) -> int | None:
"""
Returns the maximum number of instances of this data source type for the organization.
If None, there is no limit.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_current_instance_count(org: Organization) -> int:
"""
Returns the current number of instances of this data source type for the organization.
Only called if `get_instance_limit` returns a number >0
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def get_relocation_model_name() -> str:
"""
Returns the normalized model name (e.g., "sentry.querysubscription") for the model that
source_id references. This is used during backup/relocation to map old PKs to new PKs.
The format is "app_label.model_name" in lowercase.
"""
raise NotImplementedError
| DataSourceTypeHandler |
python | langchain-ai__langchain | libs/langchain_v1/tests/integration_tests/cache/fake_embeddings.py | {
"start": 150,
"end": 1007
} | class ____(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index.
"""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents.
"""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
| FakeEmbeddings |
python | sympy__sympy | sympy/polys/polyerrors.py | {
"start": 2364,
"end": 2421
} | class ____(BasePolynomialError):
pass
| HeuristicGCDFailed |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 16212,
"end": 16304
} | class ____(OpcodeWithArg): # Arg: Flags
_FLAGS = HAS_ARGUMENT
__slots__ = ()
| FORMAT_VALUE |
python | django__django | django/contrib/admin/migrations/0001_initial.py | {
"start": 111,
"end": 2507
} | class ____(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("contenttypes", "__first__"),
]
operations = [
migrations.CreateModel(
name="LogEntry",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"action_time",
models.DateTimeField(auto_now=True, verbose_name="action time"),
),
(
"object_id",
models.TextField(null=True, verbose_name="object id", blank=True),
),
(
"object_repr",
models.CharField(max_length=200, verbose_name="object repr"),
),
(
"action_flag",
models.PositiveSmallIntegerField(verbose_name="action flag"),
),
(
"change_message",
models.TextField(verbose_name="change message", blank=True),
),
(
"content_type",
models.ForeignKey(
on_delete=models.SET_NULL,
blank=True,
null=True,
to="contenttypes.ContentType",
verbose_name="content type",
),
),
(
"user",
models.ForeignKey(
to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name="user",
),
),
],
options={
"ordering": ["-action_time"],
"db_table": "django_admin_log",
"verbose_name": "log entry",
"verbose_name_plural": "log entries",
},
bases=(models.Model,),
managers=[
("objects", django.contrib.admin.models.LogEntryManager()),
],
),
]
| Migration |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 142853,
"end": 146774
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[4, 3]", L_y_: "f32[3, 4]"):
l_x_ = L_x_
l_y_ = L_y_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
_wrap_for_grad: "f32[4, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = _wrap_for_grad = None
diff_primals: "f32[3, 4]" = torch._C._functorch._wrap_for_grad(l_y_, 1); l_y_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
_set_tensor_requires_grad: "f32[3, 4]" = torch._functorch.eager_transforms._set_tensor_requires_grad(diff_primals); _set_tensor_requires_grad = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
primals_out: "f32[3, 4]" = diff_primals.sin()
results: "f32[3, 4]" = torch._C._functorch._unwrap_for_grad(primals_out, 1)
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
tensor: "i64[1]" = torch.tensor((12,))
cumsum: "i64[1]" = tensor.cumsum(dim = 0); tensor = None
getitem: "i64[0]" = cumsum[slice(None, -1, None)]; cumsum = None
neg: "i64[0]" = getitem.neg(); getitem = None
unbind = neg.unbind(); neg = unbind = None
chunk: "f32[12, 12]" = results.new_zeros(12, 12); results = None
diagonal: "f32[12]" = chunk.diagonal(0)
fill_: "f32[12]" = diagonal.fill_(1); diagonal = fill_ = None
basis: "f32[12, 3, 4]" = chunk.view(12, 3, 4); chunk = None
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(12, 'error'); _vmap_increment_nesting = None
_add_batch_dim: "f32[3, 4]" = torch._functorch.predispatch._add_batch_dim(basis, 0, 1); basis = None
_autograd_grad = torch._functorch.eager_transforms._autograd_grad([primals_out], [diff_primals], [_add_batch_dim], retain_graph = True, create_graph = True); primals_out = diff_primals = _add_batch_dim = None
batched_outputs: "f32[3, 4]" = _autograd_grad[0]; _autograd_grad = None
chunked_result: "f32[12, 3, 4]" = torch._functorch.predispatch._remove_batch_dim(batched_outputs, 1, 12, 0); batched_outputs = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
split = chunked_result.split((12,), dim = 0); chunked_result = None
split_1: "f32[12, 3, 4]" = split[0]; split = None
output_input: "f32[3, 4, 3, 4]" = split_1.view((3, 4, 3, 4)); split_1 = None
return (output_input,)
""",
)
def test_jacrev_has_aux(self):
counters.clear()
def fn(x, y):
return y.sin(), x
def wrapper_fn(x, y):
return torch.func.jacrev(fn, argnums=1, has_aux=True)(x, y)
x = torch.randn(4, 3)
y = torch.randn(3, 4)
wrapped_gm = self._compile_check(wrapper_fn, (x, y))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | jamielennox__requests-mock | requests_mock/mocker.py | {
"start": 9163,
"end": 11439
} | class ____(MockerCore):
"""The standard entry point for mock Adapter loading.
"""
#: Defines with what should method name begin to be patched
TEST_PREFIX = 'test'
def __init__(self, **kwargs):
"""Create a new mocker adapter.
:param str kw: Pass the mock object through to the decorated function
as this named keyword argument, rather than a positional argument.
:param bool real_http: True to send the request to the real requested
uri if there is not a mock installed for it. Defaults to False.
"""
self._kw = kwargs.pop('kw', None)
super(Mocker, self).__init__(**kwargs)
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def __call__(self, obj):
if isinstance(obj, type):
return self.decorate_class(obj)
return self.decorate_callable(obj)
def copy(self):
"""Returns an exact copy of current mock
"""
m = type(self)(
kw=self._kw,
real_http=self.real_http,
case_sensitive=self.case_sensitive
)
return m
def decorate_callable(self, func):
"""Decorates a callable
:param callable func: callable to decorate
"""
@functools.wraps(func)
def inner(*args, **kwargs):
with self.copy() as m:
if self._kw:
kwargs[self._kw] = m
else:
args = list(args)
args.append(m)
return func(*args, **kwargs)
return inner
def decorate_class(self, klass):
"""Decorates methods in a class with request_mock
Method will be decorated only if it name begins with `TEST_PREFIX`
:param object klass: class which methods will be decorated
"""
for attr_name in dir(klass):
if not attr_name.startswith(self.TEST_PREFIX):
continue
attr = getattr(klass, attr_name)
if not hasattr(attr, '__call__'):
continue
m = self.copy()
setattr(klass, attr_name, m(attr))
return klass
mock = Mocker
| Mocker |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 1606,
"end": 2828
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
app_id: str,
api_token: str,
start_date: str,
timezone: Optional[str] = None,
):
"""Airbyte Source for Appsflyer.
Args:
name (str): The name of the destination.
app_id (str): App identifier as found in AppsFlyer.
api_token (str): Pull API token for authentication. If you change the account admin, the token changes, and you must update scripts with the new token. Get the API token in the Dashboard.
start_date (str): The default value to use if no bookmark exists for an endpoint. Raw Reports historical lookback is limited to 90 days.
timezone (Optional[str]): Time zone in which date times are stored. The project timezone may be found in the App settings in the AppsFlyer console.
"""
self.app_id = check.str_param(app_id, "app_id")
self.api_token = check.str_param(api_token, "api_token")
self.start_date = check.str_param(start_date, "start_date")
self.timezone = check.opt_str_param(timezone, "timezone")
super().__init__("Appsflyer", name)
| AppsflyerSource |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_contextlib.py | {
"start": 17833,
"end": 22458
} | class ____(__TestCase):
@support.requires_docstrings
def test_instance_docs(self):
# Issue 19330: ensure context manager instances have good docstrings
cm_docstring = mycontext.__doc__
obj = mycontext()
self.assertEqual(obj.__doc__, cm_docstring)
def test_contextdecorator(self):
context = mycontext()
with context as result:
self.assertIs(result, context)
self.assertTrue(context.started)
self.assertEqual(context.exc, (None, None, None))
def test_contextdecorator_with_exception(self):
context = mycontext()
with self.assertRaisesRegex(NameError, 'foo'):
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
context = mycontext()
context.catch = True
with context:
raise NameError('foo')
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorator(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_decorator_with_exception(self):
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
raise NameError('foo')
with self.assertRaisesRegex(NameError, 'foo'):
test()
self.assertIsNotNone(context.exc)
self.assertIs(context.exc[0], NameError)
def test_decorating_method(self):
context = mycontext()
with torch._dynamo.error_on_graph_break(False):
class Test(object):
@context
def method(self, a, b, c=None):
self.a = a
self.b = b
self.c = c
# these tests are for argument passing when used as a decorator
test = Test()
test.method(1, 2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
self.assertEqual(test.c, None)
test = Test()
test.method('a', 'b', 'c')
self.assertEqual(test.a, 'a')
self.assertEqual(test.b, 'b')
self.assertEqual(test.c, 'c')
test = Test()
test.method(a=1, b=2)
self.assertEqual(test.a, 1)
self.assertEqual(test.b, 2)
def test_typo_enter(self):
with torch._dynamo.error_on_graph_break(False):
class mycontext(ContextDecorator):
def __unter__(self):
pass
def __exit__(self, *exc):
pass
with self.assertRaisesRegex(TypeError, 'the context manager'):
with mycontext():
pass
def test_typo_exit(self):
with torch._dynamo.error_on_graph_break(False):
class mycontext(ContextDecorator):
def __enter__(self):
pass
def __uxit__(self, *exc):
pass
with self.assertRaisesRegex(TypeError, 'the context manager.*__exit__'):
with mycontext():
pass
def test_contextdecorator_as_mixin(self):
with torch._dynamo.error_on_graph_break(False):
class somecontext(object):
started = False
exc = None
def __enter__(self):
self.started = True
return self
def __exit__(self, *exc):
self.exc = exc
class mycontext(somecontext, ContextDecorator):
pass
context = mycontext()
@context
def test():
self.assertIsNone(context.exc)
self.assertTrue(context.started)
test()
self.assertEqual(context.exc, (None, None, None))
def test_contextmanager_as_decorator(self):
@contextmanager
def woohoo(y):
state.append(y)
yield
state.append(999)
state = []
@woohoo(1)
def test(x):
self.assertEqual(state, [1])
state.append(x)
test('something')
self.assertEqual(state, [1, 'something', 999])
# Issue #11647: Ensure the decorated function is 'reusable'
state = []
test('something else')
self.assertEqual(state, [1, 'something else', 999])
| TestContextDecorator |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 114252,
"end": 114593
} | class ____(BaseModel):
"""
Pair of points (a, b) with score
"""
a: "ExtendedPointId" = Field(..., description="Pair of points (a, b) with score")
b: "ExtendedPointId" = Field(..., description="Pair of points (a, b) with score")
score: float = Field(..., description="Pair of points (a, b) with score")
| SearchMatrixPair |
python | tensorflow__tensorflow | tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py | {
"start": 13537,
"end": 15090
} | class ____(_HistogramCalibrationAlgorithmBase):
"""HistogramMseSymmetric for calculating min and max values of calibration result."""
def get_min_max_value(self) -> tuple[float, float]:
"""Finds min and max starting from the center index.
The HistogramMseSymmetric method starts from the center bin and expands the
range to both sides. This works better when the data is well-centered.
Returns:
(min_value, max_value): Min and max calculated using the method starting
from center and expanding.
"""
# This function is currently only called in this method, but will be used in
# other methods in the future.
return self._get_min_max_value_by_expanding_range(self._num_bins // 2)
def get_min_max_value(
statistics: calib_stats_pb2.CalibrationStatistics,
calib_opts: stablehlo_quant_config_pb2.CalibrationOptions,
) -> tuple[float, float]:
"""Calculates min and max from statistics using calibration options.
Args:
statistics: Collected calibration statistics.
calib_opts: Calibration options used for calculating min and max.
Returns:
(min_value, max_value): Min and max calculated using calib_opts.
Raises:
ValueError: Unsupported calibration method is given.
"""
calib_method = calib_opts.calibration_method
if calib_method not in _REGISTRY:
raise ValueError(f'Unsupported calibration method: {calib_method}')
calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts)
return calibration_algorithm.get_min_max_value()
| _HistogramMseSymmetric |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 6828,
"end": 14113
} | class ____(JitTestCase):
@parametrize("fn, expected_partition, bookend_non_compute_pass", [
(TestPartitionFunctions.forward1, [["add_7", "add_6"], ["add_5", "add_4", "add_3"], ["add_2", "add_1", "add"]], False),
(TestPartitionFunctions.forward2, [["add_3", "add_2"], ["add_1", "add"]], False),
# 1 horizontal fusion with common producer
(TestPartitionFunctions.forward3, [["add_2", "add_1", "add"]], False),
(TestPartitionFunctions.forward4, [["add_2", "add_1", "add"]], False),
# 2 branches cases
(TestPartitionFunctions.forward5, [["add_1", "add"]], False),
(TestPartitionFunctions.forward6, [["add"]], False),
(TestPartitionFunctions.forward7, [["add_3", "add_2", "add", "add_1"]], False),
(TestPartitionFunctions.forward8, [["add_3", "add_2", "add", "add_1"]], False),
# 3 branch cases
(TestPartitionFunctions.forward9, [['add_3', 'add_2', 'add_1', 'add']], False),
(TestPartitionFunctions.forward10, [['add_3', 'add_2', 'add', 'add_1']], False),
(TestPartitionFunctions.forward11, [['add_1'], ['add']], False),
# 4 not necessarily the only partition, just to verify that there's no cyclic dependency after partition
(TestPartitionFunctions.forward12, [["add_2", "add_3", "add_4"], ["add", "add_1"]], False),
# 5 getitem special case
(TestPartitionFunctions.forward13, [["add_2", "add_1", "add"]], False),
(TestPartitionFunctions.forward14, [["add", "std_mean", "getitem", "getitem_1"]], False),
# 6 bookend non_compute pass
(TestPartitionFunctions.forward15, [["permute_1", "add_1", "add"]], True),
(TestPartitionFunctions.forward15, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
(TestPartitionFunctions.forward16, [["permute_1", "add_1", "add"]], True),
(TestPartitionFunctions.forward16, [['add_1', 'add', 'permute_1', 'view', 'permute_2', 'permute_3', 'permute']], False),
# should be empty partition, not a partition with empty nodes
(TestPartitionFunctions.forward18, [], False),
])
def test_partitioner(self, fn, expected_partition, bookend_non_compute_pass):
traced = symbolic_trace(fn)
non_compute_ops = []
if bookend_non_compute_pass:
non_compute_ops = ["torch.ops.aten.view", "torch.ops.aten.permute"]
supported_ops = MockOperatorSupport()
partitioner = CapabilityBasedPartitioner(traced,
supported_ops,
allows_single_node_partition=True,
non_compute_ops=non_compute_ops)
partitions = partitioner.propose_partitions()
if bookend_non_compute_pass:
partitioner.remove_bookend_non_compute_ops(partitions)
partitions_name = [[node.name for node in partition.nodes] for partition in partitions]
assert len(partitions_name) == len(expected_partition)
for i in range(len(partitions_name)):
assert set(partitions_name[i]) == set(expected_partition[i])
fused_graph = partitioner.fuse_partitions(partitions)
a, b, c = torch.rand(4), torch.rand(4), torch.rand(4)
expected = fn(a, b, c)
result = fused_graph(a, b, c)
torch.testing.assert_close(expected, result)
@parametrize("fn, expected_partition", [
(TestPartitionFunctions.forward17, [['add', 'add_1', 'add_2']]),
])
def test_partitioner_independent_output(self, fn, expected_partition):
traced = symbolic_trace(fn)
supported_ops = MockOperatorSupport()
partitioner = CapabilityBasedPartitioner(traced,
supported_ops,
allows_single_node_partition=True)
partitions = partitioner.propose_partitions()
partitions_name = [[node.name for node in partition.nodes] for partition in partitions]
assert len(partitions_name) == len(expected_partition)
for i in range(len(partitions_name)):
assert set(partitions_name[i]) == set(expected_partition[i])
fused_graph = partitioner.fuse_partitions(partitions)
a, b, c, d, e, f = torch.rand(4), torch.rand(4), torch.rand(4), torch.rand(4), torch.rand(4), torch.rand(4)
expected = fn(a, b, c, d, e, f)
result = fused_graph(a, b, c, d, e, f)
torch.testing.assert_close(expected, result)
@parametrize("partition", [
[['add', 'add_1'], ['add_5', 'add_6']],
[['add', 'add_1', 'add_2']], # vertical fusion
[['add_2', 'add_3']], # horizontal fusion
[['add_3', 'add_4']],
[['add_6', 'add_5']], # arbitrary node order
[['add_4', 'add_1', 'add_3', 'add_2']], # arbitrary node order
[['add_5', 'add_6'], ['add_1', 'add_2', 'add_3', 'add_4']], # arbitrary partition order
[['add_5', 'linear2']], # includes call_function + call_module node
[['add_6', 'relu']], # includes call_function + call_module node
[['param', 'add_2']], # includes get_attr + call_module nodes
[['param', 'add_1', 'linear']], # includes get_attr + call_function + call_module nodes
[["add", "linear", "add_1", "param", "add_2", "add_3", "add_4", "linear2", "add_5", "add_6", "relu"]], # full graph
])
def test_fuser_util(self, partition):
m = TestModule()
gm = symbolic_trace(m)
nodes_by_name = {node.name : node for node in gm.graph.nodes}
partitions = []
for node_names in partition:
partitions.append(dict.fromkeys([nodes_by_name[name] for name in node_names]))
fused_graph = fuse_by_partitions(gm, partitions)
a, b, c = torch.rand(4), torch.rand(4), torch.rand(4)
expected = m(a, b, c)
result = fused_graph(a, b, c)
torch.testing.assert_close(expected, result)
@parametrize("partition", [
[['add', 'add_1'], ['add_1', 'add_5', 'add_6']], # add_1 exists in multiple partitions
[['add', 'add_1', 'add_3']], # invalid partition: circular dependency
[['add_4', 'add_5']], # invalid partition: circular dependency
[['relu', 'add_5']], # invalid partition: circular dependency
])
def test_fuser_util_xfail(self, partition):
m = TestModule()
gm = symbolic_trace(m)
nodes_by_name = {node.name : node for node in gm.graph.nodes}
partitions = []
for node_names in partition:
partitions.append(dict.fromkeys([nodes_by_name[name] for name in node_names]))
with self.assertRaises(Exception):
fuse_by_partitions(gm, partitions)
def test_fuser_pass_deep_model(self):
m = TestDeepModule()
traced = symbolic_trace(m)
supported_ops = MockOperatorSupport()
partitioner = CapabilityBasedPartitioner(traced,
supported_ops,
allows_single_node_partition=True)
partitions = partitioner.propose_partitions()
@dataclass
| TestFXGraphPasses |
python | sympy__sympy | sympy/logic/algorithms/lra_theory.py | {
"start": 31052,
"end": 31770
} | class ____():
"""
Object to keep track of upper and lower bounds
on `self.var`.
"""
def __init__(self, var):
self.upper = LRARational(float("inf"), 0)
self.upper_from_eq = False
self.upper_from_neg = False
self.lower = LRARational(-float("inf"), 0)
self.lower_from_eq = False
self.lower_from_neg = False
self.assign = LRARational(0,0)
self.var = var
self.col_idx = None
def __repr__(self):
return repr(self.var)
def __eq__(self, other):
if not isinstance(other, LRAVariable):
return False
return other.var == self.var
def __hash__(self):
return hash(self.var)
| LRAVariable |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 15205,
"end": 15504
} | class ____(Constraint):
"""
Constrain to the unit simplex in the innermost (rightmost) dimension.
Specifically: `x >= 0` and `x.sum(-1) == 1`.
"""
event_dim = 1
def check(self, value):
return torch.all(value >= 0, dim=-1) & ((value.sum(-1) - 1).abs() < 1e-6)
| _Simplex |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_theme05.py | {
"start": 350,
"end": 2146
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_theme05.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line", "subtype": "stacked"})
chart.axis_ids = [68411392, 68414848]
# Add some test data for the chart(s).
for row_num in range(8):
for col_num in range(6):
worksheet.write_number(row_num, col_num, 1)
chart.add_series(
{
"values": ["Sheet1", 0, 0, 7, 0],
"line": {"color": Color((3, 0))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 1, 7, 1],
"line": {"color": Color((3, 1))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 2, 7, 2],
"line": {"color": Color((3, 2))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 3, 7, 3],
"line": {"color": Color((3, 3))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 4, 7, 4],
"line": {"color": Color((3, 4))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 5, 7, 5],
"line": {"color": Color((3, 5))},
}
)
worksheet.insert_chart(8, 7, chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | openai__openai-python | src/openai/lib/streaming/responses/_responses.py | {
"start": 3166,
"end": 4303
} | class ____(Generic[TextFormatT]):
def __init__(
self,
api_request: Callable[[], Stream[RawResponseStreamEvent]],
*,
text_format: type[TextFormatT] | Omit,
input_tools: Iterable[ToolParam] | Omit,
starting_after: int | None,
) -> None:
self.__stream: ResponseStream[TextFormatT] | None = None
self.__api_request = api_request
self.__text_format = text_format
self.__input_tools = input_tools
self.__starting_after = starting_after
def __enter__(self) -> ResponseStream[TextFormatT]:
raw_stream = self.__api_request()
self.__stream = ResponseStream(
raw_stream=raw_stream,
text_format=self.__text_format,
input_tools=self.__input_tools,
starting_after=self.__starting_after,
)
return self.__stream
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
self.__stream.close()
| ResponseStreamManager |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_simplify/SIM910.py | {
"start": 1026,
"end": 1431
} | class ____:
def __init__(self):
self.name = "Tom"
data = Data()
ages = {"Tom": 23, "Maria": 23, "Dog": 11}
age = ages.get(data.name, None)
# Complex expression as key
ages = {"Tom": 23, "Maria": 23, "Dog": 11}
age = ages.get("Tom" if True else "Maria", None)
# Function call as key
def get_key():
return "Tom"
ages = {"Tom": 23, "Maria": 23, "Dog": 11}
age = ages.get(get_key(), None)
| Data |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 2278,
"end": 2907
} | class ____(TypedDict):
"""The serializable data passed from the orchestration process to the external process. This gets
wrapped in a :py:class:`PipesContext`.
"""
asset_keys: Optional[Sequence[str]]
code_version_by_asset_key: Optional[Mapping[str, Optional[str]]]
provenance_by_asset_key: Optional[Mapping[str, Optional["PipesDataProvenance"]]]
partition_key: Optional[str]
partition_key_range: Optional["PipesPartitionKeyRange"]
partition_time_window: Optional["PipesTimeWindow"]
run_id: str
job_name: Optional[str]
retry_number: int
extras: Mapping[str, Any]
| PipesContextData |
python | pydantic__pydantic | pydantic/deprecated/config.py | {
"start": 1876,
"end": 2508
} | class ____(type):
def __getattribute__(self, __name: str) -> Any:
# The @deprecated decorator accesses other attributes, so we only emit a warning for the expected ones
if __name in {'allow', 'ignore', 'forbid'}:
warnings.warn(
"`pydantic.config.Extra` is deprecated, use literal values instead (e.g. `extra='allow'`)",
DeprecationWarning,
stacklevel=2,
)
return super().__getattribute__(__name)
@deprecated(
"Extra is deprecated. Use literal values instead (e.g. `extra='allow'`)", category=PydanticDeprecatedSince20
)
| _ExtraMeta |
python | pyca__cryptography | tests/doubles.py | {
"start": 475,
"end": 621
} | class ____(CipherAlgorithm):
name = "dummy-cipher"
block_size = 128
key_size = 256
key_sizes = frozenset([256])
| DummyCipherAlgorithm |
python | kamyu104__LeetCode-Solutions | Python/move-zeroes.py | {
"start": 29,
"end": 587
} | class ____(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
pos = 0
for i in xrange(len(nums)):
if nums[i]:
nums[i], nums[pos] = nums[pos], nums[i]
pos += 1
def moveZeroes2(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nums.sort(cmp=lambda a, b: 0 if b else -1)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/_line.py | {
"start": 233,
"end": 4133
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo"
_path_str = "scattergeo.line"
_valid_props = {"color", "dash", "width"}
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | getsentry__sentry | src/sentry/digests/types.py | {
"start": 977,
"end": 1401
} | class ____(NamedTuple):
key: str
value: Notification
timestamp: float
@property
def datetime(self) -> datetime_mod.datetime:
return to_datetime(self.timestamp)
def with_rules(self, rules: list[Rule]) -> RecordWithRuleObjects:
return RecordWithRuleObjects(
key=self.key,
value=self.value.with_rules(rules),
timestamp=self.timestamp,
)
| Record |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_solr_backend.py | {
"start": 28859,
"end": 30069
} | class ____(TestCase):
def test_all_cases(self, mock_send_request, mock_log):
self.sample_objs = []
for i in range(1, 4):
mock = MockModel()
mock.id = i
mock.author = "daniel%s" % i
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
# Setup the rest of the bits.
ui = UnifiedIndex()
smmi = SolrMockSearchIndex()
ui.build(indexes=[smmi])
connections["solr"]._index = ui
sb = connections["solr"].get_backend()
# Prior to the addition of the try/except bits, these would all fail miserably.
sb.update(smmi, self.sample_objs)
self.assertEqual(mock_log.call_count, 1)
sb.remove(self.sample_objs[0])
self.assertEqual(mock_log.call_count, 2)
sb.search("search")
self.assertEqual(mock_log.call_count, 3)
sb.more_like_this(self.sample_objs[0])
self.assertEqual(mock_log.call_count, 4)
sb.clear([MockModel])
self.assertEqual(mock_log.call_count, 5)
sb.clear()
self.assertEqual(mock_log.call_count, 6)
| FailedSolrSearchBackendTestCase |
python | fluentpython__example-code | attic/sequences/table.py | {
"start": 2638,
"end": 4572
} | class ____(collections.UserList):
"""A table with rows, all of the same width"""
def __init__(self, rows):
super().__init__(Row(r) for r in rows)
if len(self) < 1:
raise ValueError('Table must have at least one row.')
self.width = self.check_width()
def check_width(self):
row_widths = {len(row) for row in self.data}
if len(row_widths) > 1:
raise ValueError('All rows must have equal length.')
return row_widths.pop()
@classmethod
def blank(class_, rows, columns, filler=None):
return class_([[filler] * columns for i in range(rows)])
def __repr__(self):
prefix = '%s(' % self.__class__.__name__
indent = ' ' * len(prefix)
rows = (',\n' + indent).join(
repr(row) for row in self.data)
return prefix + rows + ')'
def _get_indexes(self, position):
if isinstance(position, tuple): # multiple indexes
if len(position) == 2: # two indexes: t[i, j]
return position
else:
raise IndexError('index must be [i] or [i, j]')
else: # one index: t[i]
return position, None
def __getitem__(self, position):
i, j = self._get_indexes(position)
if isinstance(i, slice):
if j is None: # build sub-table w/ full rows
return Table(self.data[position])
else: # build sub-table w/ sub-rows
return Table(cells[j] for cells in self.data[i])
else: # i is number
try:
row = self.data[i]
except IndexError:
msg = 'no row at index %r of %d-row table'
raise IndexError(msg % (position, len(self)))
if j is None: # return row at table[i]
return row
else:
return row[j] # return row[j] or row[a:b]
| Table |
python | huggingface__transformers | src/transformers/models/flex_olmo/modular_flex_olmo.py | {
"start": 13311,
"end": 15827
} | class ____(MixtralModel):
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
| FlexOlmoModel |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 7275,
"end": 8560
} | class ____:
"""
line and column information wrt document, values start at zero (0)
"""
attrib = line_col_attrib
def __init__(self):
# type: () -> None
self.line = None
self.col = None
self.data = None # type: Optional[Dict[Any, Any]]
def add_kv_line_col(self, key, data):
# type: (Any, Any) -> None
if self.data is None:
self.data = {}
self.data[key] = data
def key(self, k):
# type: (Any) -> Any
return self._kv(k, 0, 1)
def value(self, k):
# type: (Any) -> Any
return self._kv(k, 2, 3)
def _kv(self, k, x0, x1):
# type: (Any, Any, Any) -> Any
if self.data is None:
return None
data = self.data[k]
return data[x0], data[x1]
def item(self, idx):
# type: (Any) -> Any
if self.data is None:
return None
return self.data[idx][0], self.data[idx][1]
def add_idx_line_col(self, key, data):
# type: (Any, Any) -> None
if self.data is None:
self.data = {}
self.data[key] = data
def __repr__(self):
# type: () -> str
return _F('LineCol({line}, {col})', line=self.line, col=self.col) # type: ignore
| LineCol |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/integration_features.py | {
"start": 734,
"end": 1276
} | class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (IntegrationFeaturesPermissions,)
def get(self, request: Request, *args: Any, **kwargs: Any) -> Response:
return self.respond(
[
serialize(IntegrationFeature(feature=feature), request.user, has_target=False)
for feature, _ in Feature.as_choices()
],
status=status.HTTP_200_OK,
)
| IntegrationFeaturesEndpoint |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0027_remove_json_with_html_feature.py | {
"start": 533,
"end": 773
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0026_ad-free-option"),
]
operations = [
migrations.RunPython(forward_add_feature, reverse_add_feature),
]
| Migration |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 20734,
"end": 20832
} | class ____:
def _repr_pretty_(self, p, cycle):
p.text("I am a banana")
@dataclass
| Banana |
python | cython__cython | Cython/Compiler/TypeSlots.py | {
"start": 21940,
"end": 23561
} | class ____(SlotDescriptor):
# Descriptor for a substructure of the type object.
#
# sub_slots [SlotDescriptor]
def __init__(self, sub_slots, slot_type, slot_name, substructures, ifdef=None, cast_cname=None):
SlotDescriptor.__init__(self, slot_name, ifdef=ifdef)
self.sub_slots = sub_slots
self.slot_type = slot_type
self.cast_cname = cast_cname
substructures.append(self)
def is_empty(self, scope):
for slot in self.sub_slots:
if slot.slot_code(scope) != "0":
return False
return True
def substructure_cname(self, scope):
return "%s%s_%s" % (Naming.pyrex_prefix, self.slot_name, scope.class_name)
def slot_code(self, scope):
if not self.is_empty(scope):
cast = ""
if self.cast_cname:
cast = f"({self.cast_cname}*)"
return f"{cast}&{self.substructure_cname(scope)}"
return "0"
def generate_substructure(self, scope, code):
if not self.is_empty(scope):
code.putln("")
if self.ifdef:
code.putln("#if %s" % self.ifdef)
code.putln(
"static %s %s = {" % (
self.slot_type,
self.substructure_cname(scope)))
for slot in self.sub_slots:
slot.generate(scope, code)
code.putln("};")
if self.ifdef:
code.putln("#endif")
def generate_spec(self, scope, code):
for slot in self.sub_slots:
slot.generate_spec(scope, code)
| SuiteSlot |
python | django__django | tests/custom_managers/models.py | {
"start": 6518,
"end": 6569
} | class ____(AbstractPerson):
pass
| PersonFromAbstract |
python | cherrypy__cherrypy | cherrypy/lib/cpstats.py | {
"start": 11781,
"end": 16323
} | class ____(cherrypy.Tool):
"""Record various information about the current request."""
def __init__(self):
"""Initialize the statistics gathering tool."""
cherrypy.Tool.__init__(self, 'on_end_request', self.record_stop)
def _setup(self):
"""Plug this tool into ``cherrypy.request``.
The standard CherryPy request object will automatically call
this method when the tool is "turned on" in config.
"""
if appstats.get('Enabled', False):
cherrypy.Tool._setup(self)
self.record_start()
def record_start(self):
"""Record the beginning of a request."""
request = cherrypy.serving.request
if not hasattr(request.rfile, 'bytes_read'):
request.rfile = ByteCountWrapper(request.rfile)
request.body.fp = request.rfile
r = request.remote
appstats['Current Requests'] += 1
appstats['Total Requests'] += 1
appstats['Requests'][_get_threading_ident()] = {
'Bytes Read': None,
'Bytes Written': None,
# Use a lambda so the ip gets updated by tools.proxy later
'Client': lambda s: '%s:%s' % (r.ip, r.port),
'End Time': None,
'Processing Time': proc_time,
'Request-Line': request.request_line,
'Response Status': None,
'Start Time': time.time(),
}
def record_stop(
self,
uriset=None,
slow_queries=1.0,
slow_queries_count=100,
debug=False,
**kwargs,
):
"""Record the end of a request."""
resp = cherrypy.serving.response
w = appstats['Requests'][_get_threading_ident()]
r = cherrypy.request.rfile.bytes_read
w['Bytes Read'] = r
appstats['Total Bytes Read'] += r
if resp.stream:
w['Bytes Written'] = 'chunked'
else:
cl = int(resp.headers.get('Content-Length', 0))
w['Bytes Written'] = cl
appstats['Total Bytes Written'] += cl
w['Response Status'] = getattr(
resp,
'output_status',
resp.status,
).decode()
w['End Time'] = time.time()
p = w['End Time'] - w['Start Time']
w['Processing Time'] = p
appstats['Total Time'] += p
appstats['Current Requests'] -= 1
if debug:
cherrypy.log('Stats recorded: %s' % repr(w), 'TOOLS.CPSTATS')
if uriset:
rs = appstats.setdefault('URI Set Tracking', {})
r = rs.setdefault(
uriset,
{
'Min': None,
'Max': None,
'Count': 0,
'Sum': 0,
'Avg': average_uriset_time,
},
)
if r['Min'] is None or p < r['Min']:
r['Min'] = p
if r['Max'] is None or p > r['Max']:
r['Max'] = p
r['Count'] += 1
r['Sum'] += p
if slow_queries and p > slow_queries:
sq = appstats.setdefault('Slow Queries', [])
sq.append(w.copy())
if len(sq) > slow_queries_count:
sq.pop(0)
cherrypy.tools.cpstats = StatsTool()
# ---------------------- CherryPy Statistics Reporting ---------------------- #
thisdir = os.path.abspath(os.path.dirname(__file__))
missing = object()
def locale_date(v):
"""Format given date per current locale."""
return time.strftime('%c', time.gmtime(v))
def iso_format(v):
"""Format given date as ISO string."""
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(v))
def pause_resume(ns):
"""Produce pause or resume HTML form maker."""
def _pause_resume(enabled):
pause_disabled = ''
resume_disabled = ''
if enabled:
resume_disabled = 'disabled="disabled" '
else:
pause_disabled = 'disabled="disabled" '
return """
<form action="pause" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Pause" %s/>
</form>
<form action="resume" method="POST" style="display:inline">
<input type="hidden" name="namespace" value="%s" />
<input type="submit" value="Resume" %s/>
</form>
""" % (ns, pause_disabled, ns, resume_disabled)
return _pause_resume
| StatsTool |
python | scrapy__scrapy | tests/test_command_shell.py | {
"start": 4889,
"end": 5666
} | class ____:
def test_fetch(self, mockserver: MockServer) -> None:
args = (
sys.executable,
"-m",
"scrapy.cmdline",
"shell",
)
env = os.environ.copy()
env["SCRAPY_PYTHON_SHELL"] = "python"
logfile = BytesIO()
# https://github.com/python/typeshed/issues/14915
p = PopenSpawn(args, env=cast("os._Environ", env), timeout=5)
p.logfile_read = logfile
p.expect_exact("Available Scrapy objects")
p.sendline(f"fetch('{mockserver.url('/')}')")
p.sendline("type(response)")
p.expect_exact("HtmlResponse")
p.sendeof()
p.wait()
logfile.seek(0)
assert "Traceback" not in logfile.read().decode()
| TestInteractiveShell |
python | pypa__pip | src/pip/_vendor/rich/_win32_console.py | {
"start": 10402,
"end": 22755
} | class ____:
"""This class allows interaction with the legacy Windows Console API. It should only be used in the context
of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
the entire API should work.
Args:
file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
"""
BRIGHT_BIT = 8
# Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
ANSI_TO_WINDOWS = [
0, # black The Windows colours are defined in wincon.h as follows:
4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
12, # bright red
10, # bright green
14, # bright yellow
9, # bright blue
13, # bright magenta
11, # bright cyan
15, # bright white
]
def __init__(self, file: "IO[str]") -> None:
handle = GetStdHandle(STDOUT)
self._handle = handle
default_text = GetConsoleScreenBufferInfo(handle).wAttributes
self._default_text = default_text
self._default_fore = default_text & 7
self._default_back = (default_text >> 4) & 7
self._default_attrs = self._default_fore | (self._default_back << 4)
self._file = file
self.write = file.write
self.flush = file.flush
@property
def cursor_position(self) -> WindowsCoordinates:
"""Returns the current position of the cursor (0-based)
Returns:
WindowsCoordinates: The current cursor position.
"""
coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
return WindowsCoordinates(row=coord.Y, col=coord.X)
@property
def screen_size(self) -> WindowsCoordinates:
"""Returns the current size of the console screen buffer, in character columns and rows
Returns:
WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
"""
screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
return WindowsCoordinates(row=screen_size.Y, col=screen_size.X)
def write_text(self, text: str) -> None:
"""Write text directly to the terminal without any modification of styles
Args:
text (str): The text to write to the console
"""
self.write(text)
self.flush()
def write_styled(self, text: str, style: Style) -> None:
"""Write styled text to the terminal.
Args:
text (str): The text to write
style (Style): The style of the text
"""
color = style.color
bgcolor = style.bgcolor
if style.reverse:
color, bgcolor = bgcolor, color
if color:
fore = color.downgrade(ColorSystem.WINDOWS).number
fore = fore if fore is not None else 7 # Default to ANSI 7: White
if style.bold:
fore = fore | self.BRIGHT_BIT
if style.dim:
fore = fore & ~self.BRIGHT_BIT
fore = self.ANSI_TO_WINDOWS[fore]
else:
fore = self._default_fore
if bgcolor:
back = bgcolor.downgrade(ColorSystem.WINDOWS).number
back = back if back is not None else 0 # Default to ANSI 0: Black
back = self.ANSI_TO_WINDOWS[back]
else:
back = self._default_back
assert fore is not None
assert back is not None
SetConsoleTextAttribute(
self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
)
self.write_text(text)
SetConsoleTextAttribute(self._handle, attributes=self._default_text)
def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
"""Set the position of the cursor
Args:
new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
"""
if new_position.col < 0 or new_position.row < 0:
return
SetConsoleCursorPosition(self._handle, coords=new_position)
def erase_line(self) -> None:
"""Erase all content on the line the cursor is currently located at"""
screen_size = self.screen_size
cursor_position = self.cursor_position
cells_to_erase = screen_size.col
start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=start_coordinates
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=start_coordinates,
)
def erase_end_of_line(self) -> None:
"""Erase all content from the cursor position to the end of that line"""
cursor_position = self.cursor_position
cells_to_erase = self.screen_size.col - cursor_position.col
FillConsoleOutputCharacter(
self._handle, " ", length=cells_to_erase, start=cursor_position
)
FillConsoleOutputAttribute(
self._handle,
self._default_attrs,
length=cells_to_erase,
start=cursor_position,
)
def erase_start_of_line(self) -> None:
"""Erase all content from the cursor position to the start of that line"""
row, col = self.cursor_position
start = WindowsCoordinates(row, 0)
FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
FillConsoleOutputAttribute(
self._handle, self._default_attrs, length=col, start=start
)
def move_cursor_up(self) -> None:
"""Move the cursor up a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row - 1, col=cursor_position.col
),
)
def move_cursor_down(self) -> None:
"""Move the cursor down a single cell"""
cursor_position = self.cursor_position
SetConsoleCursorPosition(
self._handle,
coords=WindowsCoordinates(
row=cursor_position.row + 1,
col=cursor_position.col,
),
)
def move_cursor_forward(self) -> None:
"""Move the cursor forward a single cell. Wrap to the next line if required."""
row, col = self.cursor_position
if col == self.screen_size.col - 1:
row += 1
col = 0
else:
col += 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def move_cursor_to_column(self, column: int) -> None:
"""Move cursor to the column specified by the zero-based column index, staying on the same row
Args:
column (int): The zero-based column index to move the cursor to.
"""
row, _ = self.cursor_position
SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
def move_cursor_backward(self) -> None:
"""Move the cursor backward a single cell. Wrap to the previous line if required."""
row, col = self.cursor_position
if col == 0:
row -= 1
col = self.screen_size.col - 1
else:
col -= 1
SetConsoleCursorPosition(
self._handle, coords=WindowsCoordinates(row=row, col=col)
)
def hide_cursor(self) -> None:
"""Hide the cursor"""
current_cursor_size = self._get_cursor_size()
invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
def show_cursor(self) -> None:
"""Show the cursor"""
current_cursor_size = self._get_cursor_size()
visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
def set_title(self, title: str) -> None:
"""Set the title of the terminal window
Args:
title (str): The new title of the console window
"""
assert len(title) < 255, "Console title must be less than 255 characters"
SetConsoleTitle(title)
def _get_cursor_size(self) -> int:
"""Get the percentage of the character cell that is filled by the cursor"""
cursor_info = CONSOLE_CURSOR_INFO()
GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
return int(cursor_info.dwSize)
if __name__ == "__main__":
handle = GetStdHandle()
from pip._vendor.rich.console import Console
console = Console()
term = LegacyWindowsTerm(sys.stdout)
term.set_title("Win32 Console Examples")
style = Style(color="black", bgcolor="red")
heading = Style.parse("black on green")
# Check colour output
console.rule("Checking colour output")
console.print("[on red]on red!")
console.print("[blue]blue!")
console.print("[yellow]yellow!")
console.print("[bold yellow]bold yellow!")
console.print("[bright_yellow]bright_yellow!")
console.print("[dim bright_yellow]dim bright_yellow!")
console.print("[italic cyan]italic cyan!")
console.print("[bold white on blue]bold white on blue!")
console.print("[reverse bold white on blue]reverse bold white on blue!")
console.print("[bold black on cyan]bold black on cyan!")
console.print("[black on green]black on green!")
console.print("[blue on green]blue on green!")
console.print("[white on black]white on black!")
console.print("[black on white]black on white!")
console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
# Check cursor movement
console.rule("Checking cursor movement")
console.print()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("went back and wrapped to prev line")
time.sleep(1)
term.move_cursor_up()
term.write_text("we go up")
time.sleep(1)
term.move_cursor_down()
term.write_text("and down")
time.sleep(1)
term.move_cursor_up()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went up and back 2")
time.sleep(1)
term.move_cursor_down()
term.move_cursor_backward()
term.move_cursor_backward()
term.write_text("we went down and back 2")
time.sleep(1)
# Check erasing of lines
term.hide_cursor()
console.print()
console.rule("Checking line erasing")
console.print("\n...Deleting to the start of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled("<", Style.parse("black on red"))
term.move_cursor_backward()
time.sleep(1)
term.erase_start_of_line()
time.sleep(1)
console.print("\n\n...And to the end of the line...")
term.write_text("The red arrow shows the cursor location, and direction of erase")
time.sleep(1)
term.move_cursor_to_column(16)
term.write_styled(">", Style.parse("black on red"))
time.sleep(1)
term.erase_end_of_line()
time.sleep(1)
console.print("\n\n...Now the whole line will be erased...")
term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
time.sleep(1)
term.erase_line()
term.show_cursor()
print("\n")
| LegacyWindowsTerm |
python | sphinx-doc__sphinx | sphinx/domains/std/__init__.py | {
"start": 2994,
"end": 3817
} | class ____(XRefRole):
"""Cross-referencing role for environment variables (adds an index entry)."""
def result_nodes(
self,
document: nodes.document,
env: BuildEnvironment,
node: Element,
is_ref: bool,
) -> tuple[list[Node], list[system_message]]:
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, '', None),
('single', _('environment variable; %s') % varname, tgtid, '', None),
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
| EnvVarXRefRole |
python | python-attrs__attrs | tests/test_packaging.py | {
"start": 203,
"end": 1033
} | class ____:
def test_version(self, mod, recwarn):
"""
__version__ returns the correct version and doesn't warn.
"""
assert metadata.version("attrs") == mod.__version__
assert [] == recwarn.list
def test_does_not_exist(self, mod):
"""
Asking for unsupported dunders raises an AttributeError.
"""
with pytest.raises(
AttributeError,
match=f"module {mod.__name__} has no attribute __yolo__",
):
mod.__yolo__
def test_version_info(self, recwarn, mod):
"""
___version_info__ is not deprecated, therefore doesn't raise a warning
and parses correctly.
"""
assert isinstance(mod.__version_info__, attr.VersionInfo)
assert [] == recwarn.list
| TestLegacyMetadataHack |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 18680,
"end": 19661
} | class ____(PrefectFilterBaseModel):
"""Filter by `FlowRun.next_scheduled_start_time`."""
before_: Optional[DateTime] = Field(
default=None,
description=(
"Only include flow runs with a next_scheduled_start_time or before this"
" time"
),
)
after_: Optional[DateTime] = Field(
default=None,
description=(
"Only include flow runs with a next_scheduled_start_time at or after this"
" time"
),
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.before_ is not None:
filters.append(db.FlowRun.next_scheduled_start_time <= self.before_)
if self.after_ is not None:
filters.append(db.FlowRun.next_scheduled_start_time >= self.after_)
return filters
| FlowRunFilterNextScheduledStartTime |
python | huggingface__transformers | src/transformers/models/nanochat/modular_nanochat.py | {
"start": 4323,
"end": 4592
} | class ____(CLIPMLP):
def __init__(self, config):
super().__init__(config)
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
| NanoChatMLP |
python | pallets__werkzeug | src/werkzeug/routing/exceptions.py | {
"start": 598,
"end": 1225
} | class ____(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 308
def __init__(self, new_url: str) -> None:
super().__init__(new_url)
self.new_url = new_url
def get_response(
self,
environ: WSGIEnvironment | Request | None = None,
scope: dict[str, t.Any] | None = None,
) -> Response:
return redirect(self.new_url, self.code)
| RequestRedirect |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 11155,
"end": 11705
} | class ____(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
base_dct = {}
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.overwritten_methods)
except AttributeError:
pass
for func in cls.__dict__.values():
try:
base_dct.update(func.registered_overwritten_methods)
except AttributeError:
pass
cls.overwritten_methods = base_dct
| _OverwriteMeta |
python | huggingface__transformers | src/transformers/models/dots1/modeling_dots1.py | {
"start": 18749,
"end": 20704
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Dots1Config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Dots1Attention(config=config, layer_idx=layer_idx)
if layer_idx >= config.first_k_dense_replace:
self.mlp = Dots1MoE(config)
else:
self.mlp = Dots1MLP(config)
self.input_layernorm = Dots1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Dots1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.attention_type = config.layer_types[layer_idx]
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
| Dots1DecoderLayer |
python | justquick__django-activity-stream | actstream/gfk.py | {
"start": 478,
"end": 1436
} | class ____(QuerySet):
"""
A QuerySet with a fetch_generic_relations() method to bulk fetch
all generic related items. Similar to select_related(), but for
generic foreign keys. This wraps QuerySet.prefetch_related.
"""
def fetch_generic_relations(self, *args):
qs = self._clone()
if not settings.FETCH_RELATIONS:
return qs
private_fields = self.model._meta.private_fields
gfk_fields = [g for g in private_fields if isinstance(g, GenericForeignKey)]
if args:
gfk_fields = [g for g in gfk_fields if g.name in args]
return qs.prefetch_related(*[g.name for g in gfk_fields])
def _clone(self, klass=None, **kwargs):
return super(GFKQuerySet, self)._clone()
def none(self):
clone = self._clone({'klass': EmptyGFKQuerySet})
if hasattr(clone.query, 'set_empty'):
clone.query.set_empty()
return clone
| GFKQuerySet |
python | numba__numba | numba/core/errors.py | {
"start": 19101,
"end": 19243
} | class ____(NumbaError):
"""
An error occurred because parfors is not supported on the platform.
"""
pass
| UnsupportedParforsError |
python | huggingface__transformers | tests/models/modernbert/test_modeling_modernbert.py | {
"start": 9443,
"end": 22372
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
ModernBertModel,
ModernBertForMaskedLM,
ModernBertForSequenceClassification,
ModernBertForTokenClassification,
ModernBertForQuestionAnswering,
ModernBertForMultipleChoice,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": ModernBertModel,
"fill-mask": ModernBertForMaskedLM,
"text-classification": ModernBertForSequenceClassification,
"token-classification": ModernBertForTokenClassification,
"zero-shot": ModernBertForSequenceClassification,
"question-answering": ModernBertForQuestionAnswering,
}
if is_torch_available()
else {}
)
model_split_percents = [0.5, 0.8, 0.9]
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if inputs_dict.get("output_attentions", False):
inputs_dict["output_attentions"] = True
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = ModernBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=ModernBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_warning_if_padding_and_no_attention_mask(self):
(
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.model_tester.prepare_config_and_inputs()
# Set pad tokens in the input_ids
input_ids[0, 0] = config.pad_token_id
# Check for warnings if the attention_mask is missing.
logger = logging.get_logger("transformers.modeling_utils")
# clear cache so we can test the warning is emitted (from `warning_once`).
logger.warning_once.cache_clear()
with CaptureLogger(logger) as cl:
model = ModernBertModel(config=config)
model.to(torch_device)
model.eval()
model(input_ids, attention_mask=None)
self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out)
@unittest.skip("ModernBert doesn't use separate classes for SDPA, but a function instead.")
def test_sdpa_can_dispatch_non_composite_models(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google-bert/bert-base-uncased"
model = ModernBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_inference_equivalence_right_padding(self):
self.skipTest(reason="ModernBert flash attention does not support right padding")
@require_flash_attn
@require_torch_gpu
@pytest.mark.flash_attn_test
@slow
def test_flash_attn_2_conversion(self):
self.skipTest(reason="ModernBert doesn't use the ModernBertFlashAttention2 class method.")
@pytest.mark.torch_compile_test
def test_saved_config_excludes_reference_compile(self):
config = ModernBertConfig(reference_compile=True)
with tempfile.TemporaryDirectory() as tmpdirname:
config.save_pretrained(tmpdirname)
with open(os.path.join(tmpdirname, "config.json")) as f:
config_dict = json.load(f)
self.assertNotIn("reference_compile", config_dict)
@require_flash_attn
@require_torch_accelerator
@pytest.mark.flash_attn_test
def test_flash_attention_dispatches_by_default(self):
"ModernBert should dispatch to FA2 by default, not SDPA"
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config=config)
self.assertTrue(model.config._attn_implementation == "flash_attention_2")
# This is overloaded because the model handles padding / unpadding on its own, thus ModernBertForMultipleChoice has
# a different hidden states shape when using FA2.
def flash_attn_inference_equivalence(
self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2
):
r"""
Tests the equivalence between the eager and flash attention implementations.
This test is only for inference and runs with `dtype=torch.bfloat16`.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
# This flag is used to know if the test was skipped for all `self.all_model_classes` or not
_has_run_at_least_one_model = False
for model_class in self.all_model_classes:
# Custom kernel which needs the mask interface to be properly usable on these models
if not model_class._supports_attention_backend and not attn_implementation.startswith("flash_attention"):
continue
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# flash attention variants does not always support arbitrary headim
config = self._prepare_config_headdim(config, 16)
# forcing the prefill size to go over sliding window size to check for SWA correctness
if getattr(config, "sliding_window", None):
config.sliding_window = 2
model = model_class(config)
if not all(
submodel._supports_flash_attn for submodel in model.modules() if isinstance(submodel, PreTrainedModel)
):
continue
# If we end up here, at least one model class was not skipped
_has_run_at_least_one_model = True
with tempfile.TemporaryDirectory() as tmpdirname:
# Save the model so we can reload with correct attention
model.save_pretrained(tmpdirname)
# Create first inputs without attention mask
main_input = inputs_dict[model.main_input_name]
# Only keep first batch sequence
if isinstance(main_input, torch.Tensor):
main_input = main_input[:1]
# Fix the dtype
if torch.is_floating_point(main_input):
main_input = main_input.to(torch.bfloat16)
first_inputs = {model.main_input_name: main_input, "output_hidden_states": True}
# Some models have main input name which is different from input_ids, but require input_ids... e.g. BarkFine
if model.main_input_name != "input_ids" and "input_ids" in inputs_dict:
first_inputs["input_ids"] = inputs_dict["input_ids"][:1]
# If we have some pixel values, use them as well
if model.main_input_name != "pixel_values" and "pixel_values" in inputs_dict:
# NOTE: this fixes qwen2_5_vl/omni because test break w/ pixel values
if "image_grid_thw" in inputs_dict:
continue
first_inputs["pixel_values"] = inputs_dict["pixel_values"][:1].to(torch.bfloat16)
if model.config.is_encoder_decoder:
decoder_input_ids = inputs_dict.get("decoder_input_ids", first_inputs.get("input_ids"))
if decoder_input_ids is not None:
first_inputs["decoder_input_ids"] = decoder_input_ids[:1]
# Create attention mask with padding
dummy_attention_mask = inputs_dict.get("attention_mask", None)
if dummy_attention_mask is not None:
dummy_attention_mask = dummy_attention_mask[:1]
if padding_side == "left":
dummy_attention_mask[:, 1:] = 1
dummy_attention_mask[:, 0] = 0
else:
dummy_attention_mask[:, :-1] = 1
dummy_attention_mask[:, -1] = 0
# Create second inputs with attention mask and padding
second_inputs = copy.deepcopy(first_inputs)
if dummy_attention_mask is not None:
second_inputs["attention_mask"] = dummy_attention_mask
if model.config.is_encoder_decoder:
second_inputs["decoder_attention_mask"] = dummy_attention_mask
# Use prepare for class to account for special attributes (e.g. in QnA models)
first_inputs = self._prepare_for_class(first_inputs, model_class)
first_inputs = {
k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in first_inputs.items()
}
second_inputs = self._prepare_for_class(second_inputs, model_class)
second_inputs = {
k: v.to(torch_device) if isinstance(v, torch.Tensor) else v for k, v in second_inputs.items()
}
model = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="eager", device_map=torch_device
)
# First run without attention mask
outputs = model(**first_inputs)
retrieve_logits = model_class == ModernBertForMultipleChoice
logits_1_eager = outputs.logits if retrieve_logits else outputs.hidden_states[-1]
# Second run with attention mask and padding
outputs = model(**second_inputs)
logits_2_eager = outputs.logits if retrieve_logits else outputs.hidden_states[-1]
# Switch to FA
del model
model = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, device_map=torch_device
)
outputs = model(**first_inputs)
logits_1_fa = outputs.logits if retrieve_logits else outputs.hidden_states[-1]
# Second run with attention mask and padding
outputs = model(**second_inputs)
logits_2_fa = outputs.logits if retrieve_logits else outputs.hidden_states[-1]
# Check the results
torch.testing.assert_close(logits_1_eager, logits_1_fa, atol=atol, rtol=rtol)
if padding_side == "left":
torch.testing.assert_close(logits_2_eager[1:], logits_2_fa[1:], atol=atol, rtol=rtol)
# Check it can run in training mode
model.train()
_ = model(**second_inputs)
else:
torch.testing.assert_close(logits_2_eager[:-1], logits_2_fa[:-1], atol=atol, rtol=rtol)
# In this case, the test should appear as skipped, not successful
if not _has_run_at_least_one_model:
self.skipTest(
f"Model architecture does not support {attn_implementation}, or setting its attention dynamically"
)
@require_torch
| ModernBertModelTest |
python | scrapy__scrapy | tests/spiders.py | {
"start": 6237,
"end": 6538
} | class ____(SimpleSpider):
name = "asyncdef_deferred_wrapped"
async def parse(self, response):
resp = await maybe_deferred_to_future(
get_web_client_agent_req(self.mockserver.url("/status?n=200"))
)
yield {"code": resp.code}
| AsyncDefDeferredMaybeWrappedSpider |
python | facebook__pyre-check | client/dataclasses_json_extensions.py | {
"start": 480,
"end": 706
} | class ____(dataclasses_json.DataClassJsonMixin):
@classmethod
@functools.lru_cache(maxsize=64)
def cached_schema(cls) -> dataclasses_json.api.SchemaType:
return cls.schema()
| DataclassJsonMixinWithCachedSchema |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/preemption_watcher.py | {
"start": 1900,
"end": 5788
} | class ____:
"""Watch preemption signal and store it.
Notice: Currently only support Borg TPU environment with TPUClusterResolver.
This class provides a way to monitor the preemption signal during training on
TPU. It will start a background thread to watch the training process, trying
to fetch preemption message from the coordination service. When preemption
happens, the preempted worker will write the preemption message to the
coordination service. Thus getting a non-empty preemption message means there
is a preemption happened.
User can use the preemption message as a reliable preemption indicator, and
then set the coordinator to reconnect to the TPU worker instead of a fully
restart triggered by Borg. For example, a training process with
preemption recovery will be like:
```python
keep_running = True
preemption_watcher = None
while keep_running:
try:
# Initialize TPU cluster and stratygy.
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
# PreemptionWatcher must be created after connected to cluster.
preemption_watcher = tf.distribute.experimental.PreemptionWatcher()
train_model(strategy)
keep_running = False
except Exception as e:
if preemption_watcher and preemption_watcher.preemption_message:
preemption_watcher.block_until_worker_exit()
keep_running = True
else:
raise e
```
Attributes:
preemption_message: A variable to store the preemption message fetched from
the coordination service. If it is not None, then there is a preemption
happened.
platform: A PlatformDevice to indicate the current job's platform. Refer to
failure_handling_util.py for the definition of enum class PlatformDevice.
"""
def __init__(self):
# TODO(b/254321514): Integrate with GPU and cloud enviornmenmt.
self._preemption_message = None
self._platform = detect_platform()
if self._platform != PlatformDevice.INTERNAL_TPU:
logging.warning(
"Preemption watcher does not support environment: %s", self._platform
)
else:
_preemption_watcher_initialization_counter.get_cell().increase_by(1)
threading.Thread(target=self._watch_preemption_key, daemon=True).start()
@property
def preemption_message(self):
"""Returns the preemption message."""
return self._preemption_message
def _watch_preemption_key(self):
logging.info("Watching preemption signal.")
message = context.context().get_config_key_value(_PREEMPTION_KEY)
_preemption_handling_counter.get_cell().increase_by(1)
logging.info("Preemption signal received.")
self._preemption_message = message
def block_until_worker_exit(self):
"""Block coordinator until workers exit.
In some rare cases, another error could be raised during the
preemption grace period. This will cause the coordinator to reconnect to the
same TPU workers, which will be killed later. It prevents the coordinator to
reconnect to new TPU workers, and falls back to a hard restart. To avoid
this situation, this method will block the coordinator to reconnect until
workers exit. This method will be a no-op for non-TPU platform.
"""
if self._platform != PlatformDevice.INTERNAL_TPU:
return
try:
context.context().get_config_key_value("BLOCK_TILL_EXIT")
except InternalError as e:
# Ensure that internal error is related to coordination service.
if "Coordination service is not enabled." not in e.message:
raise
logging.info("Workers exited.")
except (AbortedError, CancelledError, UnavailableError):
logging.info("Workers exited.")
| PreemptionWatcher |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_messages.py | {
"start": 828,
"end": 4335
} | class ____:
text_stream: Iterator[str]
"""Iterator over just the text deltas in the stream.
```py
for text in stream.text_stream:
print(text, end="", flush=True)
print()
```
"""
def __init__(self, raw_stream: Stream[RawMessageStreamEvent]) -> None:
self._raw_stream = raw_stream
self.text_stream = self.__stream_text__()
self._iterator = self.__stream__()
self.__final_message_snapshot: Message | None = None
@property
def response(self) -> httpx.Response:
return self._raw_stream.response
@property
def request_id(self) -> str | None:
return self.response.headers.get("request-id") # type: ignore[no-any-return]
def __next__(self) -> MessageStreamEvent:
return self._iterator.__next__()
def __iter__(self) -> Iterator[MessageStreamEvent]:
for item in self._iterator:
yield item
def __enter__(self) -> Self:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.close()
def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
self._raw_stream.close()
def get_final_message(self) -> Message:
"""Waits until the stream has been read to completion and returns
the accumulated `Message` object.
"""
self.until_done()
assert self.__final_message_snapshot is not None
return self.__final_message_snapshot
def get_final_text(self) -> str:
"""Returns all `text` content blocks concatenated together.
> [!NOTE]
> Currently the API will only respond with a single content block.
Will raise an error if no `text` content blocks were returned.
"""
message = self.get_final_message()
text_blocks: list[str] = []
for block in message.content:
if block.type == "text":
text_blocks.append(block.text)
if not text_blocks:
raise RuntimeError(
f".get_final_text() can only be called when the API returns a `text` content block.\nThe API returned {','.join([b.type for b in message.content])} content block type(s) that you can access by calling get_final_message().content"
)
return "".join(text_blocks)
def until_done(self) -> None:
"""Blocks until the stream has been consumed"""
consume_sync_iterator(self)
# properties
@property
def current_message_snapshot(self) -> Message:
assert self.__final_message_snapshot is not None
return self.__final_message_snapshot
def __stream__(self) -> Iterator[MessageStreamEvent]:
for sse_event in self._raw_stream:
self.__final_message_snapshot = accumulate_event(
event=sse_event,
current_snapshot=self.__final_message_snapshot,
)
events_to_fire = build_events(event=sse_event, message_snapshot=self.current_message_snapshot)
for event in events_to_fire:
yield event
def __stream_text__(self) -> Iterator[str]:
for chunk in self:
if chunk.type == "content_block_delta" and chunk.delta.type == "text_delta":
yield chunk.delta.text
| MessageStream |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_misc.py | {
"start": 2017,
"end": 24163
} | class ____(FSDPTest):
@property
def world_size(self):
return 2
@property
def process_group(self):
return dist.distributed_c10d._get_default_group()
@skip_if_lt_x_gpu(2)
@parametrize("use_index", [True, False])
def test_fsdp_device_id(self, use_index):
"""
Tests the FSDP ``device_id`` argument:
- Wrapping a CPU module should move the module to the GPU matching
``device_id``
- Wrapping a GPU module already on the GPU matching ``device_id``
should not raise an error
- Wrapping a GPU module already on GPU and passing a GPU device
without specifying a device ID (i.e. ``torch.device("cuda")``) warns
"""
dev_id = (
torch.accelerator.current_device_index()
if use_index
else torch.device(device_type, torch.accelerator.current_device_index())
)
def _check_device_matches(module, device_id):
"""Checks that the ``FlatParameter``s in ``module`` have device
matching ``device_id``."""
devices = {
p.device for p in module.parameters() if isinstance(p, FlatParameter)
}
assert len(devices) > 0
self.assertEqual(1, len(devices))
found_device = devices.pop()
if use_index and not isinstance(device_id, torch.device):
device = torch.device(device_type, device_id)
else:
device = device_id
self.assertEqual(found_device, device)
# Check that FSDP parameters are moved to `device_id` for a CPU module
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_NEVER,
fsdp_kwargs={"device_id": dev_id},
)
_check_device_matches(nested_wrapped_module, dev_id)
# Check that specifying `device_id` for a GPU module already on that
# device does not raise an error
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs={"device_id": dev_id},
)
_check_device_matches(nested_wrapped_module, dev_id)
# Check that passing in `torch.device("cuda")` for a GPU module warns
regex = "does not have an explicit index"
context = self.assertWarnsRegex(
expected_warning=UserWarning, expected_regex=regex
)
with context:
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs={"device_id": torch.device(device_type)},
)
_check_device_matches(
nested_wrapped_module,
torch.device(device_type, torch.accelerator.current_device_index()),
)
@skip_if_lt_x_gpu(2)
def test_fsdp_zero2_eval_with_prefetch(self):
# Test FSDP validation with SHARD_GRAD_OP and forward_prefetch
class Mnist(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
self.ln = nn.LayerNorm(9216)
def forward(self, x, y):
x = self.conv1(x)
x = torch.nn.functional.relu(x)
x = self.conv2(x)
x = torch.nn.functional.relu(x)
x = torch.nn.functional.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.ln(x)
x = self.fc1(x)
x = torch.nn.functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = torch.nn.functional.log_softmax(x, dim=1)
loss = torch.nn.functional.cross_entropy(output, y)
return loss
model = Mnist().to(device=device_type)
model1 = Mnist().to(device=device_type)
model1.load_state_dict(model.state_dict())
fsdp_model = FSDP(
model,
sharding_strategy=ShardingStrategy.SHARD_GRAD_OP,
forward_prefetch=True,
use_orig_params=True,
auto_wrap_policy=ModuleWrapPolicy([nn.Linear, nn.Conv2d]),
)
ddp_model = torch.nn.parallel.DistributedDataParallel(
model1,
)
fsdp_opt = torch.optim.SGD(fsdp_model.parameters(), lr=1e-4)
ddp_opt = torch.optim.SGD(ddp_model.parameters(), lr=1e-4)
seed = self.rank + 20231010
torch.manual_seed(seed)
torch.get_device_module(device_type).manual_seed(seed)
losses = []
grads = []
for i in range(5):
x = torch.randn(8, 1, 28, 28, device=device_type).requires_grad_()
y = torch.randint(low=0, high=9, size=(8,), device=device_type)
for model, opt in ((fsdp_model, fsdp_opt), (ddp_model, ddp_opt)):
seed = self.rank + i
torch.manual_seed(seed)
torch.get_device_module(device_type).manual_seed(seed)
loss = model(x, y).sum()
losses.append(loss)
loss.backward()
opt.step()
grads.append(x.grad)
opt.zero_grad()
assert torch.allclose(losses[0], losses[1])
assert torch.allclose(grads[0], grads[1])
losses.clear()
grads.clear()
with torch.no_grad():
fsdp_model.eval()
ddp_model.eval()
for _ in range(5):
x = torch.randn(8, 1, 28, 28, device=device_type).requires_grad_()
y = torch.randint(low=0, high=9, size=(8,), device=device_type)
fsdp_loss = fsdp_model(x, y)
ddp_loss = ddp_model(x, y)
assert torch.allclose(fsdp_loss, ddp_loss)
fsdp_model.train()
ddp_model.train()
for i in range(5):
x = torch.randn(8, 1, 28, 28, device=device_type).requires_grad_()
y = torch.randint(low=0, high=9, size=(8,), device=device_type)
for model, opt in ((fsdp_model, fsdp_opt), (ddp_model, ddp_opt)):
seed = self.rank + i
torch.manual_seed(seed)
torch.get_device_module(device_type).manual_seed(seed)
loss = model(x, y).sum()
losses.append(loss)
loss.backward()
opt.step()
grads.append(x.grad)
opt.zero_grad()
assert torch.allclose(losses[0], losses[1])
assert torch.allclose(grads[0], grads[1])
losses.clear()
grads.clear()
@skip_if_lt_x_gpu(2)
@parametrize("use_second_layer", [True, False])
@parametrize("sharding_strategy", [ShardingStrategy.NO_SHARD, None])
def test_fsdp_module_no_compute_grad(self, use_second_layer, sharding_strategy):
# When use_second_layer=True, b is involved in forward computation but does
# not receive grad in backward. Otherwise, b is not involved in forward
# computation.
class MyModel(nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = nn.Linear(10, 10)
self.b = nn.Linear(10, 10)
def forward(self, x, y):
out1 = self.a(x)
if use_second_layer:
out2 = self.b(y)
return out1, out2
else:
return out1
fsdp = FSDP(
MyModel().to(device=device_type),
sharding_strategy=sharding_strategy,
auto_wrap_policy=always_wrap_policy,
)
x = torch.randn(10, 10, device=device_type)
y = torch.randn(10, 10, device=device_type)
for _ in range(4):
if use_second_layer:
a, _ = fsdp(x, y)
else:
a = fsdp(x, y)
loss = a.sum()
loss.backward()
# self.a receives grad, self.b does not
a_grad = fsdp.module.a._handle.flat_param.grad
b_grad = fsdp.module.b._handle.flat_param.grad
self.assertIsNotNone(a_grad)
self.assertIsNone(b_grad)
@skip_if_lt_x_gpu(2)
def test_fsdp_not_all_outputs_used_in_loss(self):
self.run_subtests(
{
"sharding_strategy": [
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
ShardingStrategy.NO_SHARD,
]
},
self._test_fsdp_not_all_outputs_used_in_loss,
)
def _test_fsdp_not_all_outputs_used_in_loss(
self, sharding_strategy: ShardingStrategy
):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.lin1 = nn.Linear(4, 4)
self.lin2 = nn.Linear(4, 4)
def forward(self, x):
a = self.lin1(x)
b = self.lin2(x)
return (a, b)
def _check_resharded(fsdp_module):
handle = fsdp_module._handle
if not handle:
return
param = handle.flat_param
if handle.uses_sharded_strategy:
full_param = param._full_param_padded
self.assertEqual(full_param.storage().size(), 0)
self.assertEqual(param.data_ptr(), param._local_shard.data_ptr())
def _check_equal(local, fsdp):
with FSDP.summon_full_params(fsdp):
for p1, p2 in zip(fsdp.parameters(), local.parameters()):
torch.testing.assert_close(p1, p2)
fsdp_ctor = functools.partial(FSDP, sharding_strategy=sharding_strategy)
m = MyModule().to(device=device_type)
m_local = deepcopy(m)
local_m = m_local
prev_params = [p.clone() for p in m_local.parameters()]
m.lin1 = fsdp_ctor(m.lin1)
m = fsdp_ctor(m)
_check_equal(m_local, m)
opt = torch.optim.SGD(m.parameters(), lr=1e-3)
opt_local = torch.optim.SGD(local_m.parameters(), lr=1e-3)
for i in range(6):
t = torch.ones(4, device=device_type)
a, b = m(t)
local_a, local_b = local_m(t)
if i < 2:
# use both params in loss computation. Later,
# b will go unused and we check grads are the
# same as local training.
loss = (a @ b).sum()
loss_local = (local_a @ local_b).sum()
else:
loss = a.sum()
loss_local = local_a.sum()
loss.backward()
loss_local.backward()
_check_resharded(m)
opt.step()
opt_local.step()
_check_equal(m_local, m)
# Ensure at least some change from previous params, otherwise
# above check would be vacuously true.
self.assertTrue(
any(
not torch.equal(p1, p2)
for p1, p2 in zip(prev_params, m_local.parameters())
)
)
prev_params = [p.clone() for p in local_m.parameters()]
opt.zero_grad()
opt_local.zero_grad()
dist.barrier()
@skip_if_lt_x_gpu(2)
def test_fsdp_optim_overlap_no_use_orig_params_error(self):
fsdp_overlap = FSDP(
MyModel().to(device=device_type),
auto_wrap_policy=always_wrap_policy,
use_orig_params=False,
)
optim_cls = torch.optim.SGD
optim_kwargs = {"lr": 0.03}
_apply_optimizer_in_backward(
optimizer_class=optim_cls,
params=fsdp_overlap.parameters(),
optimizer_kwargs=optim_kwargs,
register_hook=False,
)
inp = torch.randn(10, 10, device=device_type)
with self.assertRaisesRegex(
RuntimeError, "only supported with use_orig_params=True"
):
fsdp_overlap(inp, inp)
@skip_if_lt_x_gpu(2)
def test_fsdp_optimizer_overlap(self):
torch.manual_seed(0)
for cpu_offload in [True, False]:
offload = CPUOffload(offload_params=cpu_offload)
model = MyModel().to(device=device_type)
model_overlap = deepcopy(model)
fsdp = FSDP(
model.to(device=device_type),
auto_wrap_policy=always_wrap_policy,
use_orig_params=True,
cpu_offload=offload,
)
fsdp_overlap = FSDP(
model_overlap.to(device=device_type),
auto_wrap_policy=always_wrap_policy,
use_orig_params=True,
cpu_offload=offload,
)
optim_cls = torch.optim.SGD
optim_kwargs = {"lr": 0.03}
_apply_optimizer_in_backward(
optimizer_class=optim_cls,
params=fsdp_overlap.parameters(),
optimizer_kwargs=optim_kwargs,
register_hook=False,
)
for p in fsdp_overlap.parameters():
assert hasattr(p, "_in_backward_optimizers")
optim = optim_cls(fsdp.parameters(), **optim_kwargs)
# Verify params initially equal
for p1, p2 in zip(fsdp.parameters(), fsdp_overlap.parameters()):
self.assertEqual(p1, p2)
with FSDP.summon_full_params(fsdp_overlap):
fsdp_overlap_prev_params = [
(n, p.clone()) for n, p in fsdp_overlap.named_parameters()
]
for i in range(6):
inp = torch.randn(2, 2, device=device_type)
with torch.no_grad():
inp_clone = inp.clone()
fsdp(inp, inp).sum().backward()
fsdp_overlap(inp_clone, inp_clone).sum().backward()
optim.step()
optim.zero_grad()
# Overlapped optimizer FSDP module should have sharded_grad as None.
for fsdp_unit in FSDP.fsdp_modules(fsdp_overlap):
handle = fsdp_unit._handle
if handle:
handle_grad = handle.sharded_grad
self.assertEqual(
None,
handle_grad,
"Overlapped FSDP sharded_grad is not None!",
)
# Note: FSDP without optimizer overlap won't set sharded_grad to None until the next
# pre-forward since it needs to run FSDP specific logic that picks up that set_to_none=True
# has been called (or that the gradients have been otherwise set to None)
# Verify parameters are different than prev iteration
with FSDP.summon_full_params(fsdp_overlap, with_grads=True):
for (n, p), (n_prev, p_prev) in zip(
fsdp_overlap.named_parameters(), fsdp_overlap_prev_params
):
self.assertEqual(n, n_prev)
self.assertNotEqual(
p,
p_prev,
f"{n_prev} Params at iter {i} same as previous iter!",
)
# Verify overlap and non overlapped are the same
with FSDP.summon_full_params(fsdp_overlap):
with FSDP.summon_full_params(fsdp):
for (n_overlap, p_overlap), (n, p) in zip(
fsdp_overlap.named_parameters(), fsdp.named_parameters()
):
self.assertEqual(n_overlap, n)
self.assertEqual(
p,
p_overlap,
f"Rank {self.rank}: Params not equal at iteration {i}: {n_overlap} - {p} vs {p_overlap}",
)
self.assertEqual(
None, p.grad, f"Expected param {n} grad to be None"
)
self.assertEqual(
None,
p_overlap.grad,
f"Expected param {n_overlap} grad to be None",
)
fsdp_overlap_prev_params = [
(n, p.clone()) for n, p in fsdp_overlap.named_parameters()
]
@skip_if_lt_x_gpu(2)
def test_fsdp_cpu_training(self):
"""Tests FSDP training on CPU."""
gloo_pg = dist.new_group(backend="gloo")
for ss in [
ShardingStrategy.NO_SHARD,
ShardingStrategy.FULL_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
]:
torch.manual_seed(42)
model = MyModel()
ref_model = DDP(deepcopy(model), process_group=gloo_pg)
model = FSDP(
model,
sharding_strategy=ss,
auto_wrap_policy=always_wrap_policy,
process_group=gloo_pg,
device_id=torch.device("cpu"),
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
torch.manual_seed(42 + self.rank)
inp = torch.randn(2, 2)
for _ in range(10):
losses = []
for _model, _optim in ((ref_model, ref_optim), (model, optim)):
loss = _model(inp, inp).sum()
losses.append(loss)
loss.backward()
_optim.step()
_optim.zero_grad()
self.assertEqual(losses[0], losses[1])
@skip_if_lt_x_gpu(2)
def test_fsdp_cpu_init_stays_on_cpu(self):
# Move me to MT test once warning logging and backward collective issue
# is resolved.
"""Tests that passing a CPU module to FSDP preserves that the wrapped
module is on CPU after FSDP initialization, albeit after logging a
warning, and that FSDP moves CPU input to GPU before the forward."""
torch.accelerator.set_device_index(self.rank)
regex = "passed-in `module` is on CPU"
context = self.assertWarnsRegex(
expected_warning=UserWarning, expected_regex=regex
)
with context:
nested_wrapped_module = NestedWrappedModule.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_NEVER,
)
fsdp_model = FSDP(nested_wrapped_module, self.process_group)
devices = {p.device for p in fsdp_model.parameters()}
self.assertEqual(1, len(devices))
self.assertEqual(torch.device("cpu"), devices.pop())
fsdp_model = fsdp_model.to(device=device_type)
# Ensure fwd + backward can be performed after moving to CUDA.
# CPU input also tests that input is correctly moved to appropriate
# CUDA device.
inp = fsdp_model.module.get_input(device=torch.device("cpu"))
fsdp_model(*inp).sum().backward()
@skip_if_lt_x_gpu(2)
def test_cpu_init_with_sync_module_states(self):
"""
Tests that passing ``sync_module_states=True`` raises an error for
a CPU module since the synchronization requires GPU communication,
while additionally passing ``device_id`` does not raise an error, even
when the model has CPU buffers.
"""
def init_nested_wrapped_module():
return NestedWrappedModule.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_NEVER,
)
with self.assertRaisesRegex(
ValueError,
"The module has CPU parameters or buffers when `sync_module_states=True`",
):
FSDP(
init_nested_wrapped_module(),
self.process_group,
sync_module_states=True,
)
# Check that `device_id` with `sync_module_states=True` works
nested_wrapped_module = init_nested_wrapped_module()
nested_wrapped_module.buf = nn.Buffer(
torch.ones((2, 2), device="cpu") * self.rank
)
nested_wrapped_module.module[0].buf = nn.Buffer(
torch.ones((3, 2), device="cpu") * self.rank
)
nested_wrapped_module = FSDP(
nested_wrapped_module,
self.process_group,
auto_wrap_policy=ModuleWrapPolicy({nn.Linear}),
device_id=torch.accelerator.current_device_index(),
sync_module_states=True,
)
# Each rank's buffers should be 0s since rank 0 is the source, and they
# should be on GPU since we specified `device_id`
self.assertEqual(
nested_wrapped_module.buf.device,
torch.device(device_type, torch.accelerator.current_device_index()),
)
self.assertEqual(nested_wrapped_module.buf, torch.zeros((2, 2)))
self.assertEqual(
nested_wrapped_module.module.module[0].buf.device,
torch.device(device_type, torch.accelerator.current_device_index()),
)
self.assertEqual(
nested_wrapped_module.module.module[0].buf, torch.zeros((3, 2))
)
| TestFSDPMiscMultiProcess |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 46010,
"end": 49808
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"dependencies": 9000, "dynamic": ["dependencies"]}}
)
with pytest.raises(
ValueError,
match=(
"Metadata field `dependencies` cannot be both statically defined and listed in field `project.dynamic`"
),
):
_ = metadata.core.dependencies
def test_not_array(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"dependencies": 10}})
with pytest.raises(TypeError, match="Field `project.dependencies` must be an array"):
_ = metadata.core.dependencies
def test_entry_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"dependencies": [10]}})
with pytest.raises(TypeError, match="Dependency #1 of field `project.dependencies` must be a string"):
_ = metadata.core.dependencies
def test_invalid(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"dependencies": ["foo^1"]}})
with pytest.raises(ValueError, match="Dependency #1 of field `project.dependencies` is invalid: .+"):
_ = metadata.core.dependencies
def test_direct_reference(self, isolation):
metadata = ProjectMetadata(
str(isolation), None, {"project": {"dependencies": ["proj @ git+https://github.com/org/proj.git@v1"]}}
)
with pytest.raises(
ValueError,
match=(
"Dependency #1 of field `project.dependencies` cannot be a direct reference unless "
"field `tool.hatch.metadata.allow-direct-references` is set to `true`"
),
):
_ = metadata.core.dependencies
def test_direct_reference_allowed(self, isolation):
metadata = ProjectMetadata(
str(isolation),
None,
{
"project": {"dependencies": ["proj @ git+https://github.com/org/proj.git@v1"]},
"tool": {"hatch": {"metadata": {"allow-direct-references": True}}},
},
)
assert metadata.core.dependencies == ["proj@ git+https://github.com/org/proj.git@v1"]
def test_context_formatting(self, isolation, uri_slash_prefix):
metadata = ProjectMetadata(
str(isolation),
None,
{
"project": {"dependencies": ["proj @ {root:uri}"]},
"tool": {"hatch": {"metadata": {"allow-direct-references": True}}},
},
)
normalized_path = str(isolation).replace("\\", "/")
assert metadata.core.dependencies == [f"proj@ file:{uri_slash_prefix}{normalized_path}"]
def test_correct(self, isolation):
metadata = ProjectMetadata(
str(isolation),
None,
{
"project": {
"dependencies": [
'python___dateutil;platform_python_implementation=="CPython"',
"bAr.Baz[TLS, Zu.Bat, EdDSA, Zu_Bat] >=1.2RC5 , <9000B1",
'Foo;python_version<"3.8"',
'fOO; python_version< "3.8"',
],
},
},
)
assert (
metadata.core.dependencies
== metadata.core.dependencies
== [
"bar-baz[eddsa,tls,zu-bat]<9000b1,>=1.2rc5",
"foo; python_version < '3.8'",
"python-dateutil; platform_python_implementation == 'CPython'",
]
)
assert metadata.core.dependencies_complex is metadata.core.dependencies_complex
| TestDependencies |
python | google__jax | tests/sparse_test.py | {
"start": 31510,
"end": 43926
} | class ____(sptu.SparseTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{cls.__name__}", "cls": cls}
for cls in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO, sparse.BCSR])
def test_pytree_flattening(self, cls):
sparse_format = cls.__name__.lower()
M = sparse.empty((2, 4), sparse_format=sparse_format)
self.assertIsInstance(M, cls)
buffers, tree = jax.tree.flatten(M)
self.assertTrue(all(isinstance(buffer, jax.Array) for buffer in buffers))
M_out = jax.tree.unflatten(tree, buffers)
self.assertEqual(M.dtype, M_out.dtype)
self.assertEqual(M.shape, M_out.shape)
self.assertEqual(M.nse, M_out.nse)
@parameterized.named_parameters(
{"testcase_name": f"_{cls.__name__}", "cls": cls}
for cls in [sparse.BCOO, sparse.BCSR])
def test_vmappable(self, cls):
# Note: test should avoid dependence on batching rules of BCOO/BCSR primitives
M = jnp.arange(24).reshape((2, 3, 4))
Msp = cls.fromdense(M, n_batch=1)
def from_elt(x):
assert x.ndim == 2
return sparse.empty(x.shape, x.dtype, sparse_format=cls.__name__.lower())
with self.subTest('from_elt'):
M_out = vmap(from_elt)(M)
self.assertIsInstance(M_out, cls)
self.assertEqual(M_out.n_batch, 1)
self.assertEqual(M.shape, M_out.shape)
def to_elt(x):
assert x.ndim == 2
assert x.n_sparse == 2
return jnp.empty(x.shape, x.dtype)
with self.subTest('to_elt'):
M_out = vmap(to_elt)(Msp)
self.assertIsInstance(M_out, jax.Array)
self.assertEqual(Msp.shape, M_out.shape)
with self.subTest('axis_None'):
x, y = vmap(lambda *args: args, in_axes=(0, None), out_axes=(0, None))(Msp, Msp)
self.assertIsInstance(x, cls)
self.assertEqual(x.n_batch, 1)
self.assertEqual(x.shape, Msp.shape)
self.assertEqual(x._info, Msp._info)
self.assertIsInstance(y, cls)
self.assertEqual(y.n_batch, 1)
self.assertEqual(y.shape, Msp.shape)
self.assertEqual(y._info, Msp._info)
@parameterized.named_parameters(
{"testcase_name": f"_{cls.__name__}", "cls": cls}
for cls in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])
def test_jit_lower(self, cls):
sparse_format = cls.__name__.lower()
M = sparse.empty((2, 4), sparse_format=sparse_format)
self.assertIsInstance(M, cls)
jax.jit(lambda x: x).lower(M) # doesn't crash
@parameterized.named_parameters(
{"testcase_name": f"_{cls.__name__}{shape}", "cls": cls, "shape": shape}
for cls in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]
for shape in ([2, 5], [5, 3]))
def test_empty(self, cls, shape):
sparse_format = cls.__name__.lower()
M = sparse.empty(shape, sparse_format=sparse_format)
self.assertIsInstance(M, cls)
self.assertEqual(M.nse, 0)
self.assertArraysEqual(M.todense(), jnp.empty(shape))
@parameterized.named_parameters(
{"testcase_name": f"_{cls.__name__}{(N, M, k)}",
"cls": cls, "N": N, "M": M, "k": k}
for cls in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]
for N in [2, 5]
for M in [None, 3]
for k in [-2, 0, 1])
def test_eye(self, cls, N, M, k):
sparse_format = cls.__name__.lower()
func = partial(sparse.eye, N, M, k, sparse_format=sparse_format)
expected = jnp.eye(N, M, k)
expected_nse = jnp.count_nonzero(expected)
mat = func()
self.assertIsInstance(mat, cls)
self.assertArraysEqual(mat.todense(), expected)
self.assertEqual(mat.nse, expected_nse)
mat_jit = jit(func)()
self.assertIsInstance(mat_jit, cls)
self.assertArraysEqual(mat_jit.todense(), expected)
self.assertEqual(mat_jit.nse, expected_nse)
@parameterized.named_parameters(
{"testcase_name": f"{nse}_BCOO{shape}", "shape": shape, "nse": nse}
for shape in ([2, 5], [5, 3])
for nse in [0, 2])
def test_empty_nse(self, shape, nse=2):
M = sparse.empty(shape, nse=nse)
self.assertEqual(M.nse, nse)
self.assertArraysEqual(M.todense(), jnp.empty(shape))
@parameterized.named_parameters(
{"testcase_name": f"_{Obj.__name__}", "Obj": Obj}
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])
def test_block_until_ready(self, Obj, shape=(5, 8), dtype=np.float32):
rng = sptu.rand_sparse(self.rng(), post=Obj.fromdense)
M = rng(shape, dtype)
self.assertEqual(M.shape, M.block_until_ready().shape)
self.assertArraysEqual(M.data, M.block_until_ready().data)
self.assertArraysEqual(M.todense(), M.block_until_ready().todense())
@parameterized.named_parameters(
{"testcase_name": f"_{Obj.__name__}", "Obj": Obj}
for Obj in [jnp.array, sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO])
def test_todense(self, Obj, shape=(5, 8), dtype=np.float32):
rng = sptu.rand_sparse(self.rng())
M_dense = rng(shape, dtype)
M = jnp.array(M_dense) if Obj is jnp.array else Obj.fromdense(M_dense)
self.assertArraysEqual(sparse.todense(M), M_dense)
self.assertArraysEqual(jit(sparse.todense)(M), M_dense)
def test_todense_scalar(self):
self.assertEqual(sparse.todense(1.0), 1.0)
self.assertEqual(jit(sparse.todense)(1.0), 1.0)
@parameterized.named_parameters(
{"testcase_name": f"_{Obj.__name__}", "Obj": Obj}
for Obj in [jnp.array, sparse.BCOO])
def test_todense_batching(self, Obj, shape=(5, 8), dtype=np.float32):
rng = sptu.rand_sparse(self.rng())
M_dense = rng(shape, dtype)
if Obj is sparse.BCOO:
M = sparse.BCOO.fromdense(M_dense, n_batch=1)
else:
M = jnp.asarray(M_dense)
self.assertArraysEqual(vmap(sparse.todense)(M), M_dense)
self.assertArraysEqual(jit(vmap(sparse.todense))(M), M_dense)
@parameterized.named_parameters(
{"testcase_name": f"_{Obj.__name__}", "Obj": Obj}
for Obj in [jnp.array, sparse.BCOO])
def test_todense_ad(self, Obj, shape=(3,), dtype=np.float32):
M_dense = jnp.array([1., 2., 3.])
M = M_dense if Obj is jnp.array else Obj.fromdense(M_dense)
bufs, tree = jax.tree.flatten(M)
jac = jnp.eye(M.shape[0], dtype=M.dtype)
jac1 = jax.jacfwd(lambda *bufs: sparse.todense_p.bind(*bufs, tree=tree))(*bufs)
jac2 = jax.jacrev(lambda *bufs: sparse.todense_p.bind(*bufs, tree=tree))(*bufs)
self.assertArraysEqual(jac1, jac2)
self.assertArraysEqual(jac, jac2)
@parameterized.named_parameters(
{"testcase_name": f"_{Obj.__name__}", "Obj": Obj}
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO, sparse.BCSR])
def test_attrs(self, Obj, shape=(5, 8), dtype=np.float32):
rng = sptu.rand_sparse(self.rng(), post=Obj.fromdense)
M = rng(shape, dtype)
self.assertIsInstance(M, Obj)
self.assertEqual(M.shape, shape)
self.assertEqual(M.size, math.prod(shape))
self.assertEqual(M.ndim, len(shape))
self.assertEqual(M.dtype, dtype)
self.assertEqual(M.nse, (M.todense() != 0).sum())
self.assertEqual(M.data.dtype, dtype)
self.assertEqual(len(M), M.shape[0])
with self.assertRaises(TypeError):
hash(M)
if isinstance(M, sparse.CSR):
self.assertEqual(len(M.data), len(M.indices))
self.assertEqual(len(M.indptr), M.shape[0] + 1)
elif isinstance(M, sparse.CSC):
self.assertEqual(len(M.data), len(M.indices))
self.assertEqual(len(M.indptr), M.shape[1] + 1)
elif isinstance(M, sparse.COO):
self.assertEqual(len(M.data), len(M.row))
self.assertEqual(len(M.data), len(M.col))
elif isinstance(M, sparse.BCOO):
self.assertEqual(M.data.shape[M.n_batch], M.indices.shape[-2])
self.assertEqual(M.indices.shape[-1], M.n_sparse)
elif isinstance(M, sparse.BCSR):
self.assertEqual(M.data.shape[M.n_batch], M.indices.shape[-1])
self.assertEqual(M.indptr.shape[-1], M.shape[M.n_batch] + 1)
else:
raise ValueError(f"{Obj=} not expected.")
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
Obj=[Obj],
shape=[(5, 8), (8, 5), (5, 5), (8, 8)],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))
def test_dense_round_trip(self, shape, dtype, Obj):
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
Msparse = Obj.fromdense(M)
self.assertArraysEqual(M, Msparse.todense())
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
Obj=[Obj],
shape=[(5, 8), (8, 5), (5, 5), (8, 8)],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))
def test_transpose(self, shape, dtype, Obj):
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
Msparse = Obj.fromdense(M)
self.assertArraysEqual(M.T, Msparse.T.todense())
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases(
[dict(shape=shape, bshape=bshape)
for shape in [(5, 8), (8, 5), (5, 5), (8, 8)]
for bshape in [shape[-1:] + s for s in [(), (3,), (4,)]]
],
Obj=[Obj],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
for Obj in [sparse.CSR, sparse.CSC, sparse.COO, sparse.BCOO]))
@jax.default_matmul_precision("float32")
def test_matmul(self, shape, dtype, Obj, bshape):
rng = sptu.rand_sparse(self.rng(), post=jnp.array)
rng_b = jtu.rand_default(self.rng())
M = rng(shape, dtype)
Msp = Obj.fromdense(M)
# Test matching type
x = rng_b(bshape, dtype)
x = jnp.asarray(x)
self.assertAllClose(
M @ x, Msp @ x, rtol=sptu.MATMUL_TOL, atol=sptu.MATMUL_TOL
)
# Test mismatched type
x = rng_b(bshape, np.int32)
x = jnp.asarray(x)
with jax.numpy_dtype_promotion('standard'):
self.assertAllClose(M @ x, Msp @ x, rtol=sptu.MATMUL_TOL)
@jtu.sample_product(
cls=[sparse.BCOO, sparse.BCSR],
input_type=[scipy.sparse.coo_matrix, scipy.sparse.csr_matrix,
scipy.sparse.csc_matrix],
shape=[(5, 8), (8, 5), (5, 5), (8, 8)],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_bcsr_from_scipy_sparse(self, cls, input_type, shape, dtype):
"""Test BCOO and BCSR from_scipy_sparse."""
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
M_scipy = input_type(M)
M_jax = cls.from_scipy_sparse(M_scipy)
self.assertArraysEqual(M, M_jax.todense())
def test_bcoo_methods(self):
M = jnp.arange(12).reshape(3, 4)
Msp = sparse.BCOO.fromdense(M)
self.assertArraysEqual(-M, (-Msp).todense())
self.assertArraysEqual(2 * M, (2 * Msp).todense())
self.assertArraysEqual(M * 2, (Msp * 2).todense())
self.assertArraysEqual(M + M, (Msp + Msp).todense())
self.assertArraysEqual(M.sum(0), Msp.sum(0).todense())
self.assertArraysEqual(M.sum(1), Msp.sum(1).todense())
self.assertArraysEqual(M.sum(), Msp.sum())
self.assertArraysEqual(M.astype(float), Msp.astype(float).todense())
@jtu.sample_product(
[dict(shape=shape, n_batch=n_batch)
for shape in [(5, 8), (8, 5), (3, 4, 5), (3, 4, 3, 2)]
for n_batch in range(len(shape) - 1)
],
dtype=jtu.dtypes.floating + jtu.dtypes.complex,
)
def test_bcoo_to_bcsr_round_trip(self, shape, dtype, n_batch):
rng = sptu.rand_sparse(self.rng())
M = rng(shape, dtype)
n_dense = len(shape) - 2 - n_batch
nse = sparse.util._count_stored_elements(M, n_batch=n_batch,
n_dense=n_dense)
_, bcoo_indices = sparse_bcoo._bcoo_fromdense(M, nse=nse, n_batch=n_batch,
n_dense=n_dense)
bcoo_to_bcsr = partial(
sparse_bcsr._bcoo_to_bcsr, shape=shape, index_dtype=bcoo_indices.dtype
)
args_maker_bcoo_to_bcsr = lambda: [bcoo_indices]
self._CompileAndCheck(bcoo_to_bcsr, args_maker_bcoo_to_bcsr)
bcsr_indices, indptr = bcoo_to_bcsr(bcoo_indices)
self.assertEqual(bcsr_indices.dtype, jnp.int32)
self.assertEqual(bcsr_indices.shape, shape[:n_batch] + (nse,))
self.assertEqual(indptr.dtype, jnp.int32)
self.assertEqual(indptr.shape, shape[:n_batch] + (shape[n_batch] + 1,))
bcsr_to_bcoo = partial(sparse_bcsr._bcsr_to_bcoo, shape=shape)
self.assertArraysEqual(bcoo_indices, bcsr_to_bcoo(bcsr_indices, indptr))
args_maker_bcsr_to_bcoo = lambda: [bcsr_indices, indptr]
self._CompileAndCheck(bcsr_to_bcoo, args_maker_bcsr_to_bcoo)
| SparseObjectTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy_test.py | {
"start": 18243,
"end": 27234
} | class ____(
CollectiveAllReduceStrategyTestBase, strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager']))
def testStrategyInitializationError(self):
with self.assertRaisesRegex(
ValueError,
'cluster_resolver and devices cannot be set at the same time'):
_ = collective_all_reduce_strategy.CollectiveAllReduceExtended(
container_strategy=None,
cluster_resolver=multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=0),
communication_options=collective_util.Options(),
devices=['GPU:0', 'GPU:1'])
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testMinimizeLoss(self, required_gpus, use_devices_arg):
# Collective ops doesn't support strategy with one device.
if context.executing_eagerly():
strategy, _ = self._get_test_object(
None, None, required_gpus, use_devices_arg=use_devices_arg)
self._test_minimize_loss_eager(strategy)
else:
self._test_minimize_loss_graph(None, None, required_gpus)
@combinations.generate(
combinations.combine(
mode=['eager'], required_gpus=[1, 2], use_devices_arg=[True, False]))
def testNumReplicasInSync(self, required_gpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, required_gpus, use_devices_arg=use_devices_arg)
self.assertEqual(required_gpus, strategy.num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=['eager'],
required_tpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testMinimizeLossTPU(self, required_tpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, num_tpus=required_tpus, use_devices_arg=use_devices_arg)
self._test_minimize_loss_eager(strategy)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testCallAndMergeExceptions(self, required_gpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
self._test_call_and_merge_exceptions(strategy)
@combinations.generate(
combinations.combine(
mode=['graph'],
required_gpus=2,
use_dataset=[True, False],
use_devices_arg=[True, False]))
def testMakeInputFnIterator(self, required_gpus, use_dataset,
use_devices_arg):
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(5 * required_gpus)
else:
def fn():
dataset = dataset_ops.Dataset.range(5 * required_gpus)
it = dataset_ops.make_one_shot_iterator(dataset)
return it.get_next
expected_values = [
range(i, i + required_gpus) for i in range(0, 10, required_gpus)
]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=required_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterator(
None,
None,
required_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
ignore_order=not use_dataset)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testReduceToCpu(self, required_gpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, required_gpus, use_devices_arg=use_devices_arg)
with strategy.scope():
result = strategy.extended.call_for_each_replica(_replica_id_f32)
reduced = strategy.reduce(reduce_util.ReduceOp.SUM, result, axis=None)
expected = sum(range(strategy.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceSum(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_sum(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceSumGradients(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_sum_gradients(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceSumGradientTape(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_sum_gradient_tape(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceMean(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_mean(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceMeanGradients(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_mean_gradients(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testAllReduceMeanGradientTape(self, required_gpus, use_devices_arg):
distribution, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_all_reduce_mean_gradient_tape(distribution)
@combinations.generate(
combinations.combine(
mode=['graph'], required_gpus=2, use_devices_arg=[True, False]))
def testNumpyDataset(self, required_gpus, use_devices_arg):
strategy, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
self._test_numpy_dataset(
strategy, session=self.cached_session(target=target))
@combinations.generate(
combinations.combine(
mode=['eager'], required_gpus=2, use_devices_arg=[True, False]))
def testReplicateDataset(self, required_gpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=required_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
self._test_input_fn_iterable(strategy, input_fn, expected_values)
@combinations.generate(
combinations.combine(mode=['graph'], use_devices_arg=[True, False]))
def testDeepCopy(self, use_devices_arg):
distribution, _ = self._get_test_object(
None, None, use_devices_arg=use_devices_arg)
copy.deepcopy(distribution)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testSummaryForReplicaZeroOnly(self, required_gpus, use_devices_arg):
strategy, target = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
with self.cached_session(target=target):
self._test_summary_for_replica_zero_only(strategy)
@combinations.generate(
combinations.combine(
mode=['graph', 'eager'],
required_gpus=[0, 1, 2],
use_devices_arg=[True, False]))
def testTrainableVariables(self, required_gpus, use_devices_arg):
strategy, _ = self._get_test_object(
None, None, num_gpus=required_gpus, use_devices_arg=use_devices_arg)
self._test_trainable_variable(strategy)
| SingleWorkerCollectiveAllReduceStrategy |
python | google__pytype | pytype/load_pytd_test.py | {
"start": 31711,
"end": 37052
} | class ____(test_base.UnitTest):
def _create_files(self, tempdir):
src = """
import module2
from typing import List
constant = True
x = List[int]
b = List[int]
class SomeClass:
def __init__(self, a: module2.ObjectMod2):
pass
def ModuleFunction():
pass
"""
tempdir.create_file("module1.pyi", src)
tempdir.create_file(
"module2.pyi",
"""
class ObjectMod2:
def __init__(self):
pass
""",
)
def _get_path(self, tempdir, filename):
return path_utils.join(tempdir.path, filename)
def _load_ast(self, tempdir, module):
loader = load_pytd.Loader(
config.Options.create(
module_name=module.module_name,
python_version=self.python_version,
pythonpath=tempdir.path,
)
)
return loader, loader.load_file(
module.module_name, self._get_path(tempdir, module.file_name)
)
def _pickle_modules(self, loader, tempdir, *modules):
for module in modules:
pickle_utils.SerializeAndSave(
loader._modules[module.module_name].ast,
self._get_path(tempdir, module.file_name + ".pickled"),
)
def _load_pickled_module(self, tempdir, module):
pickle_loader = load_pytd.PickledPyiLoader(
config.Options.create(
python_version=self.python_version, pythonpath=tempdir.path
)
)
return pickle_loader.load_file(
module.module_name, self._get_path(tempdir, module.file_name)
)
def test_load_with_same_module_name(self):
with test_utils.Tempdir() as d:
self._create_files(tempdir=d)
module1 = _Module(module_name="foo.bar.module1", file_name="module1.pyi")
module2 = _Module(module_name="module2", file_name="module2.pyi")
loader, ast = self._load_ast(tempdir=d, module=module1)
self._pickle_modules(loader, d, module1, module2)
pickled_ast_filename = self._get_path(d, module1.file_name + ".pickled")
result = pickle_utils.SerializeAndSave(ast, pickled_ast_filename)
self.assertIsNone(result)
loaded_ast = self._load_pickled_module(d, module1)
self.assertTrue(loaded_ast)
self.assertIsNot(loaded_ast, ast)
self.assertTrue(pytd_utils.ASTeq(ast, loaded_ast))
loaded_ast.Visit(visitors.VerifyLookup())
def test_star_import(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", "class A: ...")
d.create_file("bar.pyi", "from foo import *")
foo = _Module(module_name="foo", file_name="foo.pyi")
bar = _Module(module_name="bar", file_name="bar.pyi")
loader, _ = self._load_ast(d, module=bar)
self._pickle_modules(loader, d, foo, bar)
loaded_ast = self._load_pickled_module(d, bar)
loaded_ast.Visit(visitors.VerifyLookup())
self.assertEqual(pytd_utils.Print(loaded_ast), "from foo import A")
def test_function_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(): ...
g = f
""",
)
foo = _Module(module_name="foo", file_name="foo.pyi")
loader, _ = self._load_ast(d, module=foo)
self._pickle_modules(loader, d, foo)
loaded_ast = self._load_pickled_module(d, foo)
g = loaded_ast.Lookup("foo.g")
self.assertEqual(g.type, loaded_ast.Lookup("foo.f"))
def test_package_relative_import(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("pkg/foo.pyi"), "class X: ...")
d.create_file(
file_utils.replace_separator("pkg/bar.pyi"),
"""
from .foo import X
y = ... # type: X""",
)
foo = _Module(
module_name="pkg.foo",
file_name=file_utils.replace_separator("pkg/foo.pyi"),
)
bar = _Module(
module_name="pkg.bar",
file_name=file_utils.replace_separator("pkg/bar.pyi"),
)
loader, _ = self._load_ast(d, module=bar)
self._pickle_modules(loader, d, foo, bar)
loaded_ast = self._load_pickled_module(d, bar)
loaded_ast.Visit(visitors.VerifyLookup())
def test_pickled_builtins(self):
with test_utils.Tempdir() as d:
filename = d.create_file("builtins.pickle")
foo_path = d.create_file(
"foo.pickle",
"""
import datetime
tz = ... # type: datetime.tzinfo
""",
)
# save builtins
load_pytd.Loader(
config.Options.create(
module_name="base", python_version=self.python_version
)
).save_to_pickle(filename)
# load builtins
loader = load_pytd.PickledPyiLoader.load_from_pickle(
filename,
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath="",
),
)
loader.options.tweak(
imports_map=imports_map.ImportsMap(items={"foo": foo_path})
)
# test import
self.assertTrue(loader.import_name("sys"))
self.assertTrue(loader.import_name("__future__"))
self.assertTrue(loader.import_name("datetime"))
self.assertTrue(loader.import_name("foo"))
self.assertTrue(loader.import_name("ctypes"))
| PickledPyiLoaderTest |
python | getsentry__sentry | src/sentry/integrations/services/repository/impl.py | {
"start": 776,
"end": 6381
} | class ____(RepositoryService):
def serialize_repository(
self,
*,
organization_id: int,
id: int,
as_user: RpcUser | None = None,
) -> Any | None:
repository = Repository.objects.filter(id=id).first()
if repository is None:
return None
return serialize(repository, user=as_user)
def get_repositories(
self,
*,
organization_id: int,
integration_id: int | None = None,
external_id: int | None = None,
providers: list[str] | None = None,
has_integration: bool | None = None,
has_provider: bool | None = None,
status: int | None = None,
) -> list[RpcRepository]:
query = Repository.objects.filter(organization_id=organization_id)
if integration_id is not None:
query = query.filter(integration_id=integration_id)
if external_id is not None:
query = query.filter(external_id=external_id)
if providers is not None:
query = query.filter(provider__in=providers)
if has_integration is not None:
query = query.filter(integration_id__isnull=not has_integration)
if has_provider is not None:
query = query.filter(provider__isnull=not has_provider)
if status is not None:
query = query.filter(status=status)
return [serialize_repository(repo) for repo in query]
def get_repository(self, *, organization_id: int, id: int) -> RpcRepository | None:
repository = Repository.objects.filter(organization_id=organization_id, id=id).first()
if repository is None:
return None
return serialize_repository(repository)
def create_repository(
self, *, organization_id: int, create: RpcCreateRepository
) -> RpcRepository | None:
try:
with enforce_constraints(transaction.atomic(router.db_for_write(Repository))):
repository = Repository.objects.create(
organization_id=organization_id, **create.dict()
)
return serialize_repository(repository)
except IntegrityError:
return None
def update_repository(self, *, organization_id: int, update: RpcRepository) -> None:
with transaction.atomic(router.db_for_write(Repository)):
repository = Repository.objects.filter(
organization_id=organization_id, id=update.id
).first()
if repository is None:
return
update_dict = update.dict()
del update_dict["id"]
for field_name, field_value in update_dict.items():
setattr(repository, field_name, field_value)
repository.save()
def update_repositories(self, *, organization_id: int, updates: list[RpcRepository]) -> None:
if not updates:
return
update_mapping: dict[int, dict[str, Any]] = {}
for update in updates:
update_dict = update.dict()
del update_dict["id"] # don't update the repo ID
update_mapping[update.id] = update_dict
if len(update_mapping.keys()) != len(updates):
raise Exception("Multiple updates for the same repository are not allowed.")
# we could be updating everything except the repo IDs, so we need to collect the fields
fields_to_update = set(list(update_mapping.values())[0].keys())
with transaction.atomic(router.db_for_write(Repository)):
repositories = Repository.objects.filter(
organization_id=organization_id, id__in=update_mapping.keys()
)
# Apply updates to each repository object
for repository in repositories:
repo_update = update_mapping[repository.id]
for field_name, field_value in repo_update.items():
setattr(repository, field_name, field_value)
Repository.objects.bulk_update(repositories, fields=list(fields_to_update))
def disable_repositories_for_integration(
self, *, organization_id: int, integration_id: int, provider: str
) -> None:
with transaction.atomic(router.db_for_write(Repository)):
Repository.objects.filter(
organization_id=organization_id,
integration_id=integration_id,
provider=provider,
).update(status=ObjectStatus.DISABLED)
def disassociate_organization_integration(
self,
*,
organization_id: int,
organization_integration_id: int,
integration_id: int,
) -> None:
with transaction.atomic(router.db_for_write(Repository)):
# Disassociate repos from the organization integration being deleted
Repository.objects.filter(
organization_id=organization_id, integration_id=integration_id
).update(integration_id=None)
# Delete Code Owners with a Code Mapping using the OrganizationIntegration
ProjectCodeOwners.objects.filter(
repository_project_path_config__in=RepositoryProjectPathConfig.objects.filter(
organization_integration_id=organization_integration_id
).values_list("id", flat=True)
).delete()
# Delete the Code Mappings
RepositoryProjectPathConfig.objects.filter(
organization_integration_id=organization_integration_id
).delete()
| DatabaseBackedRepositoryService |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 89784,
"end": 93257
} | class ____(GoogleCloudBaseOperator):
"""
Deletes an asset resource.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param lake_id: Required. The ID of the Google Cloud lake that the asset belongs to.
:param zone_id: Required. Zone identifier.
:param asset_id: Required. Asset identifier.
:param api_version: The version of the api that will be requested for example 'v3'.
:param retry: A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:return: None
"""
template_fields = (
"project_id",
"zone_id",
"asset_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
lake_id: str,
zone_id: str,
asset_id: str,
api_version: str = "v1",
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.lake_id = lake_id
self.zone_id = zone_id
self.asset_id = asset_id
self.api_version = api_version
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = DataplexHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Deleting Dataplex asset %s", self.asset_id)
operation = hook.delete_asset(
project_id=self.project_id,
region=self.region,
lake_id=self.lake_id,
zone_id=self.zone_id,
asset_id=self.asset_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
hook.wait_for_operation(timeout=self.timeout, operation=operation)
self.log.info("Dataplex asset %s deleted successfully!", self.asset_id)
| DataplexDeleteAssetOperator |
python | streamlit__streamlit | lib/tests/streamlit/runtime/fragment_test.py | {
"start": 3492,
"end": 18152
} | class ____(unittest.TestCase):
def setUp(self):
self.original_dg_stack = context_dg_stack.get()
root_container = MagicMock()
context_dg_stack.set(
(
DeltaGenerator(
root_container=root_container,
cursor=MagicMock(root_container=root_container),
),
)
)
def tearDown(self):
context_dg_stack.set(self.original_dg_stack)
@patch("streamlit.runtime.fragment.get_script_run_ctx", MagicMock())
def test_wrapped_fragment_calls_original_function(self):
called = False
dg_stack_len = len(context_dg_stack.get())
@fragment
def my_fragment():
nonlocal called
called = True
# Verify that a new container gets created for the contents of this
# fragment to be written to.
assert len(context_dg_stack.get()) == dg_stack_len + 1
my_fragment()
assert called
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_resets_current_fragment_id_on_success(self, patched_get_script_run_ctx):
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
assert ctx.current_fragment_id != "my_fragment_id"
ctx.current_fragment_id = "my_fragment_id"
my_fragment()
assert ctx.current_fragment_id == "my_fragment_id"
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_resets_current_fragment_id_on_exception(self, patched_get_script_run_ctx):
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
exception_message = "oh no"
@fragment
def my_exploding_fragment():
assert ctx.current_fragment_id != "my_fragment_id"
raise Exception(exception_message)
ctx.current_fragment_id = "my_fragment_id"
with pytest.raises(Exception, match=exception_message):
my_exploding_fragment()
assert ctx.current_fragment_id == "my_fragment_id"
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_wrapped_fragment_not_saved_in_FragmentStorage(
self, patched_get_script_run_ctx
):
ctx = MagicMock()
ctx.fragment_storage = MemoryFragmentStorage()
ctx.fragment_storage.set = MagicMock(wraps=ctx.fragment_storage.set)
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
pass
# Call the fragment-decorated function twice, and verify that we only save the
# fragment a single time.
my_fragment()
my_fragment()
assert ctx.fragment_storage.set.call_count == 2
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_sets_dg_stack_and_cursor_to_snapshots_if_fragment_ids_this_run(
self, patched_get_script_run_ctx
):
ctx = MagicMock()
ctx.fragment_ids_this_run = ["my_fragment_id"]
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
dg = MagicMock()
dg.my_random_field = 7
context_dg_stack.set((dg,))
ctx.cursors = MagicMock()
ctx.cursors.my_other_random_field = 8
call_count = 0
@fragment
def my_fragment():
nonlocal call_count
assert ctx.current_fragment_id is not None
curr_dg_stack = context_dg_stack.get()
# Verify that mutations made in previous runs of my_fragment aren't
# persisted.
assert curr_dg_stack[0].my_random_field == 7
assert ctx.cursors.my_other_random_field == 8
# Attempt to mutate cursors and the dg_stack.
curr_dg_stack[0].my_random_field += 1
ctx.cursors.my_other_random_field += 1
call_count += 1
my_fragment()
# Reach inside our MemoryFragmentStorage internals to pull out our saved
# fragment.
saved_fragment = next(iter(ctx.fragment_storage._fragments.values()))
# Verify that we can't mutate our dg_stack from within my_fragment. If a
# mutation is persisted between fragment runs, the assert on `my_random_field`
# will fail.
saved_fragment()
saved_fragment()
# Called once when calling my_fragment and three times calling the saved
# fragment.
assert call_count == 3
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_sets_current_fragment_id_in_full_script_runs(
self, patched_get_script_run_ctx
):
ctx = MagicMock()
ctx.fragment_ids_this_run = []
ctx.new_fragment_ids = set()
ctx.current_fragment_id = None
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
dg = MagicMock()
dg.my_random_field = 0
context_dg_stack.set((dg,))
@fragment
def my_fragment():
assert ctx.current_fragment_id is not None
curr_dg_stack = context_dg_stack.get()
curr_dg_stack[0].my_random_field += 1
assert len(ctx.new_fragment_ids) == 0
my_fragment()
# Verify that `my_fragment`'s id was added to the `new_fragment_id`s set.
assert len(ctx.new_fragment_ids) == 1
# Reach inside our MemoryFragmentStorage internals to pull out our saved
# fragment.
saved_fragment = next(iter(ctx.fragment_storage._fragments.values()))
saved_fragment()
saved_fragment()
# This time, dg should have been mutated since we don't restore it from a
# snapshot in a regular script run.
assert dg.my_random_field == 3
assert ctx.current_fragment_id is None
@parameterized.expand(
[
(None, None),
(3, 3.0),
(5.0, 5.0),
("1 minute", 60.0),
]
)
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_run_every_arg_handling(
self,
run_every,
expected_interval,
patched_get_script_run_ctx,
):
called = False
ctx = MagicMock()
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
@fragment(run_every=run_every)
def my_fragment():
nonlocal called
called = True
my_fragment()
assert called
if expected_interval is not None:
[(args, _)] = ctx.enqueue.call_args_list
msg = args[0]
assert msg.auto_rerun.interval == expected_interval
assert isinstance(msg.auto_rerun.fragment_id, str)
assert msg.auto_rerun.fragment_id != ""
else:
ctx.enqueue.assert_not_called()
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_sets_active_script_hash_if_needed(self, patched_get_script_run_ctx):
ctx = MagicMock()
patched_run_with_active_hash = MagicMock()
ctx.run_with_active_hash = patched_run_with_active_hash
ctx.fragment_storage = MemoryFragmentStorage()
ctx.pages_manager = PagesManager("")
ctx.pages_manager.set_pages({}) # Migrate to MPAv2
ctx.active_script_hash = "some_hash"
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
pass
my_fragment()
# Reach inside our MemoryFragmentStorage internals to pull out our saved
# fragment.
saved_fragment = next(iter(ctx.fragment_storage._fragments.values()))
# set the hash to something different for subsequent calls
ctx.active_script_hash = "a_different_hash"
# Verify subsequent calls will run with the original active script hash
saved_fragment()
patched_run_with_active_hash.assert_called_with("some_hash")
patched_run_with_active_hash.reset_mock()
saved_fragment()
patched_run_with_active_hash.assert_called_with("some_hash")
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_fragment_code_returns_value(
self,
patched_get_script_run_ctx,
):
ctx = MagicMock()
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
return 42
assert my_fragment() == 42
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_fragment_raises_rerun_exception_in_main_execution_context(
self, patched_get_script_run_ctx
):
"""Ensure that a rerun exception raised in a fragment when executed in the main
execution context (meaning first execution in the app flow, not via a
fragment-only rerun) is raised in the main execution context.
"""
ctx = MagicMock()
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
raise RerunException(rerun_data=None)
with pytest.raises(RerunException):
my_fragment()
@parameterized.expand([(ValueError), (TypeError), (RuntimeError), (Exception)])
def test_fragment_raises_FragmentHandledException_in_full_app_run(
self, exception_type: type[Exception]
):
"""Ensures that during full-app run the exceptions are raised."""
with patch(
"streamlit.runtime.fragment.get_script_run_ctx"
) as patched_get_script_run_ctx:
ctx = MagicMock()
ctx.fragment_storage = MemoryFragmentStorage()
patched_get_script_run_ctx.return_value = ctx
@fragment
def my_fragment():
raise exception_type()
with pytest.raises(FragmentHandledException):
my_fragment()
@patch("streamlit.runtime.fragment.get_script_run_ctx")
def test_fragment_additional_hash_info_param_used_for_generating_id(
self, patched_get_script_run_ctx
):
"""Test that the internal function can be called with an
additional hash info parameter."""
ctx = MagicMock()
patched_get_script_run_ctx.return_value = ctx
def my_function():
return ctx.current_fragment_id
fragment_id1 = _fragment(my_function)()
fragment_id2 = _fragment(my_function, additional_hash_info="some_hash_info")()
assert fragment_id1 != fragment_id2
# countercheck
fragment_id2 = _fragment(my_function, additional_hash_info="")()
assert fragment_id1 == fragment_id2
# TESTS FOR WRITING TO CONTAINERS OUTSIDE AND INSIDE OF FRAGMENT
APP_FUNCTION = Callable[[ELEMENT_PRODUCER], None]
def _run_fragment_writes_to_outside_container_app(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with container outside of fragment."""
outside_container = st.container()
@fragment
def _some_method():
st.write("Hello")
# this is forbidden
with outside_container:
element_producer()
_some_method()
def _run_fragment_writes_to_nested_outside_container_app(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with nested container outside of fragment."""
with st.container():
outside_container = st.container()
@fragment
def _some_method():
st.write("Hello")
# this is forbidden
with outside_container:
element_producer()
_some_method()
def _run_fragment_writes_to_nested_outside_container_app2(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with nested container outside of fragment writing from nested container."""
with st.container():
outside_container = st.container()
@fragment
def _some_method():
st.write("Hello")
# this is forbidden
with outside_container, st.container():
element_producer()
_some_method()
def _run_fragment_writes_to_nested_outside_container_app3(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with nested container outside of fragment writing from nested container."""
with st.container():
outside_container = st.container()
@fragment
def _some_method():
st.write("Hello")
with st.container():
# this is forbidden
with outside_container:
element_producer()
_some_method()
def _run_fragment_writes_to_inside_container_app(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with container inside of fragment."""
@fragment
def _some_method():
inside_container = st.container()
st.write("Hello")
with inside_container:
element_producer()
_some_method()
def _run_fragment_writes_to_nested_inside_container_app(
element_producer: ELEMENT_PRODUCER,
) -> None:
"""App with container inside of fragment."""
@fragment
def _some_method():
inside_container = st.container()
st.write("Hello")
with st.container(), inside_container:
element_producer()
_some_method()
outside_container_writing_apps: list[APP_FUNCTION] = [
_run_fragment_writes_to_outside_container_app,
_run_fragment_writes_to_nested_outside_container_app,
_run_fragment_writes_to_nested_outside_container_app2,
_run_fragment_writes_to_nested_outside_container_app3,
]
inside_container_writing_apps: list[APP_FUNCTION] = [
_run_fragment_writes_to_inside_container_app,
_run_fragment_writes_to_nested_inside_container_app,
]
TEST_TUPLE = tuple[str, APP_FUNCTION, ELEMENT_PRODUCER]
def get_test_tuples(
app_functions: list[APP_FUNCTION],
elements: list[tuple[str, Callable[[], DeltaGenerator]]],
) -> list[TEST_TUPLE]:
"""Create a tuple of (name, app-to-run, element-producer), so that each passed app runs with every passed element.
Parameters
----------
app_functions : list[APP_FUNCTION]
Functions that run Streamlit elements like they are an app.
elements : list[tuple[str, Callable[[], DeltaGenerator]]]
Tuples of (name, element-producer) where name describes the produced element and element_producer
is a function that executes a Streamlit element.
"""
return [
(_element_producer[0], _app, _element_producer[1])
for _app in app_functions
for _element_producer in elements
]
| FragmentTest |
python | takluyver__flit | flit_core/flit_core/common.py | {
"start": 214,
"end": 3560
} | class ____:
"""This represents the module/package that we are going to distribute
"""
in_namespace_package = False
namespace_package_name = None
def __init__(self, name: str, directory=Path()):
self.name = name
self.is_stub_pkg = name.endswith('-stubs')
# It must exist either as a .py file or a directory, but not both
name_as_path = name.replace('.', os.sep)
pkg_dir = directory / name_as_path
py_file = directory / (name_as_path+'.py')
src_pkg_dir = directory / 'src' / name_as_path
src_py_file = directory / 'src' / (name_as_path+'.py')
existing = set()
if pkg_dir.is_dir():
self.path = pkg_dir
self.is_package = True
self.prefix = ''
existing.add(pkg_dir)
if py_file.is_file():
self.path = py_file
self.is_package = False
self.prefix = ''
existing.add(py_file)
if src_pkg_dir.is_dir():
self.path = src_pkg_dir
self.is_package = True
self.prefix = 'src'
existing.add(src_pkg_dir)
if src_py_file.is_file():
self.path = src_py_file
self.is_package = False
self.prefix = 'src'
existing.add(src_py_file)
if len(existing) > 1:
existing_str = ", ".join(map(str, sorted(existing)))
raise ValueError(
f"Multiple files or folders could be module {name}: {existing_str}"
)
elif not existing:
raise ValueError(f"No file/folder found for module {name}")
self.source_dir = directory / self.prefix
if '.' in name:
self.namespace_package_name = name.rpartition('.')[0]
self.in_namespace_package = True
@property
def file(self):
if self.is_package:
return self.path / '__init__.py'
else:
return self.path
@property
def version_files(self):
"""Files which will be parsed to find a version number
Files later in this list take precedence over earlier ones.
"""
if self.is_package:
paths = [self.path / '__init__.py']
for filename in ('version.py', '_version.py', '__version__.py'):
if (self.path / filename).is_file():
paths.insert(0, self.path / filename)
return paths
else:
return [self.path]
def iter_files(self):
"""Iterate over the files contained in this module.
Yields absolute paths - caller may want to make them relative.
Excludes any __pycache__ and *.pyc files.
"""
def _include(path):
name = os.path.basename(path)
return name != '__pycache__' and not name.endswith('.pyc')
if self.is_package:
# Ensure we sort all files and directories so the order is stable
for dirpath, dirs, files in os.walk(str(self.path)):
for file in sorted(files):
full_path = os.path.join(dirpath, file)
if _include(full_path):
yield full_path
dirs[:] = [d for d in sorted(dirs) if _include(d)]
else:
yield str(self.path)
| Module |
python | apache__airflow | task-sdk/tests/task_sdk/definitions/_internal/test_templater.py | {
"start": 1043,
"end": 4469
} | class ____:
def test_get_template_env(self):
# Test get_template_env when a Dag is provided
templater = Templater()
dag = DAG(dag_id="test_dag", schedule=None, render_template_as_native_obj=True)
env = templater.get_template_env(dag)
assert isinstance(env, jinja2.Environment)
assert not env.sandboxed
# Test get_template_env when no Dag is provided
templater = Templater()
env = templater.get_template_env()
assert isinstance(env, jinja2.Environment)
assert env.sandboxed
def test_prepare_template(self):
# Test that prepare_template is a no-op
templater = Templater()
templater.prepare_template()
def test_resolve_template_files_logs_exception(self, caplog):
templater = Templater()
templater.message = "template_file.txt"
templater.template_fields = ["message"]
templater.template_ext = [".txt"]
templater.resolve_template_files()
assert "Failed to resolve template field 'message'" in caplog.text
def test_render_object_storage_path(self):
templater = Templater()
path = ObjectStoragePath("s3://bucket/key/{{ ds }}/part")
context = {"ds": "2006-02-01"}
jinja_env = templater.get_template_env()
rendered_content = templater._render_object_storage_path(path, context, jinja_env)
assert rendered_content == ObjectStoragePath("s3://bucket/key/2006-02-01/part")
def test_render_template(self):
context = {"name": "world"}
templater = Templater()
templater.message = "Hello {{ name }}"
templater.template_fields = ["message"]
templater.template_ext = [".txt"]
rendered_content = templater.render_template(templater.message, context)
assert rendered_content == "Hello world"
def test_not_render_literal_value(self):
templater = Templater()
templater.template_ext = []
context = {}
content = LiteralValue("Hello {{ name }}")
rendered_content = templater.render_template(content, context)
assert rendered_content == "Hello {{ name }}"
def test_not_render_file_literal_value(self):
templater = Templater()
templater.template_ext = [".txt"]
context = {}
content = LiteralValue("template_file.txt")
rendered_content = templater.render_template(content, context)
assert rendered_content == "template_file.txt"
@pytest.fixture
def env():
return SandboxedEnvironment(undefined=jinja2.StrictUndefined, cache_size=0)
def test_protected_access(env):
class Test:
_protected = 123
assert env.from_string(r"{{ obj._protected }}").render(obj=Test) == "123"
def test_private_access(env):
with pytest.raises(jinja2.exceptions.SecurityError):
env.from_string(r"{{ func.__code__ }}").render(func=test_private_access)
@pytest.mark.parametrize(
("name", "expected"),
(
("ds", "2012-07-24"),
("ds_nodash", "20120724"),
("ts", "2012-07-24T03:04:52+00:00"),
("ts_nodash", "20120724T030452"),
("ts_nodash_with_tz", "20120724T030452+0000"),
),
)
def test_filters(env, name, expected):
when = datetime(2012, 7, 24, 3, 4, 52, tzinfo=timezone.utc)
result = env.from_string("{{ date |" + name + " }}").render(date=when)
assert result == expected
| TestTemplater |
python | doocs__leetcode | solution/1800-1899/1818.Minimum Absolute Sum Difference/Solution.py | {
"start": 0,
"end": 554
} | class ____:
def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:
mod = 10**9 + 7
nums = sorted(nums1)
s = sum(abs(a - b) for a, b in zip(nums1, nums2)) % mod
mx = 0
for a, b in zip(nums1, nums2):
d1, d2 = abs(a - b), inf
i = bisect_left(nums, b)
if i < len(nums):
d2 = min(d2, abs(nums[i] - b))
if i:
d2 = min(d2, abs(nums[i - 1] - b))
mx = max(mx, d1 - d2)
return (s - mx + mod) % mod
| Solution |
python | keras-team__keras | keras/src/layers/pooling/global_average_pooling3d.py | {
"start": 265,
"end": 2603
} | class ____(BaseGlobalPooling):
"""Global average pooling operation for 3D data.
Args:
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
keepdims: A boolean, whether to keep the temporal dimension or not.
If `keepdims` is `False` (default), the rank of the tensor is
reduced for spatial dimensions. If `keepdims` is `True`, the
spatial dimension are retained with length 1.
The behavior is the same as for `tf.reduce_mean` or `np.mean`.
Input shape:
- If `data_format='channels_last'`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format='channels_first'`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `keepdims=False`:
2D tensor with shape `(batch_size, channels)`.
- If `keepdims=True`:
- If `data_format="channels_last"`:
5D tensor with shape `(batch_size, 1, 1, 1, channels)`
- If `data_format="channels_first"`:
5D tensor with shape `(batch_size, channels, 1, 1, 1)`
Example:
>>> x = np.random.rand(2, 4, 5, 4, 3)
>>> y = keras.layers.GlobalAveragePooling3D()(x)
>>> y.shape
(2, 3)
"""
def __init__(self, data_format=None, keepdims=False, **kwargs):
super().__init__(
pool_dimensions=3,
data_format=data_format,
keepdims=keepdims,
**kwargs,
)
def call(self, inputs):
if self.data_format == "channels_last":
return ops.mean(inputs, axis=[1, 2, 3], keepdims=self.keepdims)
return ops.mean(inputs, axis=[2, 3, 4], keepdims=self.keepdims)
| GlobalAveragePooling3D |
python | weaviate__weaviate-python-client | journey_tests/journeys.py | {
"start": 206,
"end": 1255
} | class ____:
def __init__(self, client: WeaviateClient) -> None:
self.__client = client
@classmethod
def use(cls) -> "SyncJourneys":
return cls(connect_to_local(port=8090, grpc_port=50061))
def close(self) -> None:
self.__client.close()
def simple(self) -> List[dict]:
name = "FastAPISyncTestingCollection"
if self.__client.collections.exists(name):
self.__client.collections.delete(name)
collection = self.__client.collections.create(
name=name,
properties=[
Property(name="name", data_type=DataType.TEXT),
Property(name="age", data_type=DataType.INT),
],
)
with collection.batch.dynamic() as batch:
for i in range(1000):
batch.add_object({"name": f"Person {i}", "age": i})
res = collection.query.fetch_objects(limit=100)
self.__client.collections.delete(name)
return [cast(dict, obj.properties) for obj in res.objects]
| SyncJourneys |
python | gevent__gevent | src/greentest/3.10/test_ftplib.py | {
"start": 34349,
"end": 39285
} | class ____(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyTLS_FTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
# consume from SSL socket to finalize handshake and avoid
# "SSLError [SSL] shutdown while in init"
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(sock.recv(1024),
LIST_DATA.encode(self.client.encoding))
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
@skipUnless(False, "FIXME: bpo-32706")
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd("list") as sock:
pass
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
with self.client.transfercmd("list") as sock:
pass
| TestTLS_FTPClass |
python | getsentry__sentry | src/sentry/api/serializers/models/exploresavedquery.py | {
"start": 740,
"end": 914
} | class ____(TypedDict):
orderby: list[dict[str, str]] | None
equations: list[dict[str, str | list[str]]] | None
columns: list[str]
| ExploreSavedQueryChangedReasonType |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 104543,
"end": 105466
} | class ____(Operation):
def __init__(self, axis=None, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.numpy.flip(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.flip", "keras.ops.numpy.flip"])
def flip(x, axis=None):
"""Reverse the order of elements in the tensor along the given axis.
The shape of the tensor is preserved, but the elements are reordered.
Args:
x: Input tensor.
axis: Axis or axes along which to flip the tensor. The default,
`axis=None`, will flip over all of the axes of the input tensor.
Returns:
Output tensor with entries of `axis` reversed.
"""
if any_symbolic_tensors((x,)):
return Flip(axis=axis).symbolic_call(x)
return backend.numpy.flip(x, axis=axis)
| Flip |
python | openai__openai-python | src/openai/types/realtime/realtime_mcp_list_tools.py | {
"start": 560,
"end": 889
} | class ____(BaseModel):
server_label: str
"""The label of the MCP server."""
tools: List[Tool]
"""The tools available on the server."""
type: Literal["mcp_list_tools"]
"""The type of the item. Always `mcp_list_tools`."""
id: Optional[str] = None
"""The unique ID of the list."""
| RealtimeMcpListTools |
python | PrefectHQ__prefect | tests/server/utilities/test_schemas.py | {
"start": 5567,
"end": 6373
} | class ____:
@pytest.mark.parametrize("type_", (PlainOwner, ModelOwner))
def test_class_access(self, type_: Union[PlainOwner, ModelOwner]):
assert type_.descr is type_.__dict__["descr"]
def test_base_implementation(self):
instance = PlainOwner()
with pytest.raises(AttributeError):
instance.descr
def test_pydantic_ignored_types(self):
assert "descr" not in ModelOwner.model_fields
assert "descr" in dir(ModelOwner)
assert not ModelOwner.__private_attributes__
@pytest.mark.parametrize("attr_name", ("descr", "_private_descr"))
def test_pydantic_model_access(self, attr_name: str):
instance = ModelOwner()
with pytest.raises(AttributeError):
getattr(instance, attr_name)
| TestPrefectDescriptorBase |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 55762,
"end": 55955
} | class ____(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
| EllipsisNode |
python | django__django | tests/serializers/test_deserialization.py | {
"start": 478,
"end": 4336
} | class ____(SimpleTestCase):
def setUp(self):
self.object_list = [
{"pk": 1, "model": "serializers.author", "fields": {"name": "Jane"}},
{"pk": 2, "model": "serializers.author", "fields": {"name": "Joe"}},
]
self.deserializer = Deserializer(self.object_list)
self.jane = Author(name="Jane", pk=1)
self.joe = Author(name="Joe", pk=2)
def test_deserialized_object_repr(self):
deserial_obj = DeserializedObject(obj=self.jane)
self.assertEqual(
repr(deserial_obj), "<DeserializedObject: serializers.Author(pk=1)>"
)
def test_next_functionality(self):
first_item = next(self.deserializer)
self.assertEqual(first_item.object, self.jane)
second_item = next(self.deserializer)
self.assertEqual(second_item.object, self.joe)
with self.assertRaises(StopIteration):
next(self.deserializer)
def test_invalid_model_identifier(self):
invalid_object_list = [
{"pk": 1, "model": "serializers.author2", "fields": {"name": "Jane"}}
]
self.deserializer = Deserializer(invalid_object_list)
with self.assertRaises(DeserializationError):
next(self.deserializer)
deserializer = Deserializer(object_list=[])
with self.assertRaises(StopIteration):
next(deserializer)
def test_custom_deserializer(self):
class CustomDeserializer(Deserializer):
@staticmethod
def _get_model_from_node(model_identifier):
return Author
deserializer = CustomDeserializer(self.object_list)
result = next(iter(deserializer))
deserialized_object = result.object
self.assertEqual(
self.jane,
deserialized_object,
)
def test_empty_object_list(self):
deserializer = Deserializer(object_list=[])
with self.assertRaises(StopIteration):
next(deserializer)
def test_json_bytes_input(self):
test_string = json.dumps(self.object_list)
stream = test_string.encode("utf-8")
deserializer = JsonDeserializer(stream_or_string=stream)
first_item = next(deserializer)
second_item = next(deserializer)
self.assertEqual(first_item.object, self.jane)
self.assertEqual(second_item.object, self.joe)
def test_jsonl_bytes_input(self):
test_string = """
{"pk": 1, "model": "serializers.author", "fields": {"name": "Jane"}}
{"pk": 2, "model": "serializers.author", "fields": {"name": "Joe"}}
{"pk": 3, "model": "serializers.author", "fields": {"name": "John"}}
{"pk": 4, "model": "serializers.author", "fields": {"name": "Smith"}}"""
stream = test_string.encode("utf-8")
deserializer = JsonlDeserializer(stream_or_string=stream)
first_item = next(deserializer)
second_item = next(deserializer)
self.assertEqual(first_item.object, self.jane)
self.assertEqual(second_item.object, self.joe)
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
def test_yaml_bytes_input(self):
from django.core.serializers.pyyaml import Deserializer as YamlDeserializer
test_string = """- pk: 1
model: serializers.author
fields:
name: Jane
- pk: 2
model: serializers.author
fields:
name: Joe
- pk: 3
model: serializers.author
fields:
name: John
- pk: 4
model: serializers.author
fields:
name: Smith
"""
stream = test_string.encode("utf-8")
deserializer = YamlDeserializer(stream_or_string=stream)
first_item = next(deserializer)
second_item = next(deserializer)
self.assertEqual(first_item.object, self.jane)
self.assertEqual(second_item.object, self.joe)
| TestDeserializer |
python | django__django | django/contrib/gis/geos/prototypes/geom.py | {
"start": 913,
"end": 1062
} | class ____(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
| GeomOutput |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/via_type_of.py | {
"start": 2497,
"end": 3501
} | class ____:
x = ...
y: Any = 0
z: object = []
def test4_alarm1(c: Test4_C):
# always-via-type:unknown
c.x = _test_source()
def test4_alarm2(c: Test4_C):
# always-via-type:Any
c.y = _test_source()
def test4_alarm3(c: Test4_C):
# always-via-type:object
c.z = _test_source()
def return_via_parameter_type(parameter):
return 0
def test_strings():
return return_via_parameter_type("A")
def test_numerals():
return return_via_parameter_type(1)
def test_lists():
return return_via_parameter_type(["a", "b"])
def meta(parameter):
return return_via_parameter_type(parameter)
def test_via_type_of_does_not_propagate():
return meta("Name")
def tito(parameter, other):
pass
def test_tito():
a = tito(_test_source(), [1, 2])
return a
def sink_via_type_of(x, y):
pass
def test_sink(element):
return sink_via_type_of(element, 1)
def test_backwards_tito(parameter):
return tito(parameter, "by_backwards")
| Test4_C |
python | getsentry__sentry | tests/sentry/core/endpoints/test_project_team_details.py | {
"start": 257,
"end": 440
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-team-details"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
| ProjectTeamDetailsTest |
python | django__django | tests/test_utils/test_transactiontestcase.py | {
"start": 1277,
"end": 1797
} | class ____(TestCase):
available_apps = []
databases = {"default", "other"}
def test_queries_cleared(self):
"""
TransactionTestCase._pre_setup() clears the connections' queries_log
so that it's less likely to overflow. An overflow causes
assertNumQueries() to fail.
"""
for alias in self.databases:
self.assertEqual(
len(connections[alias].queries_log), 0, "Failed for alias %s" % alias
)
| TransactionTestCaseDatabasesTests |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 32663,
"end": 33679
} | class ____(nn.Module):
"""
This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder.
"""
def __init__(self, conv_encoder, position_embedding):
super().__init__()
self.conv_encoder = conv_encoder
self.position_embedding = position_embedding
def forward(self, pixel_values, pixel_mask):
# send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples
out = self.conv_encoder(pixel_values, pixel_mask)
pos = []
for feature_map, mask in out:
# position encoding
pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype))
return out, pos
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the MMGroundingDinoEncoder. This class extends BaseModelOutput, due to:
- vision and text last hidden states
- vision and text intermediate hidden states
"""
)
| MMGroundingDinoConvModel |
python | getsentry__sentry | src/sentry/api/serializers/models/userreport.py | {
"start": 811,
"end": 914
} | class ____(UserReportSerializerResponse):
issue: dict[str, Any]
| UserReportWithGroupSerializerResponse |
python | realpython__materials | build-a-django-content-aggregator/source_code_final/podcasts/tests.py | {
"start": 135,
"end": 1504
} | class ____(TestCase):
def setUp(self):
self.episode = Episode.objects.create(
title="My Awesome Podcast Episode",
description="Look mom, I made it!",
pub_date=timezone.now(),
link="https://myawesomeshow.com",
image="https://image.myawesomeshow.com",
podcast_name="My Python Podcast",
guid="de194720-7b4c-49e2-a05f-432436d3fetr",
)
def test_episode_content(self):
self.assertEqual(self.episode.description, "Look mom, I made it!")
self.assertEqual(self.episode.link, "https://myawesomeshow.com")
self.assertEqual(
self.episode.guid, "de194720-7b4c-49e2-a05f-432436d3fetr"
)
def test_episode_str_representation(self):
self.assertEqual(
str(self.episode), "My Python Podcast: My Awesome Podcast Episode"
)
def test_home_page_status_code(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_home_page_uses_correct_template(self):
response = self.client.get(reverse("homepage"))
self.assertTemplateUsed(response, "homepage.html")
def test_homepage_list_contents(self):
response = self.client.get(reverse("homepage"))
self.assertContains(response, "My Awesome Podcast Episode")
| PodCastsTests |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 73573,
"end": 75506
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
exc = self.get_argument("exc")
if exc == "http":
raise HTTPError(410, "no longer here")
elif exc == "zero":
1 / 0
elif exc == "permission":
raise PermissionError("not allowed")
def write_error(self, status_code, **kwargs):
if "exc_info" in kwargs:
typ, value, tb = kwargs["exc_info"]
if isinstance(value, PermissionError):
self.set_status(403)
self.write("PermissionError")
return
RequestHandler.write_error(self, status_code, **kwargs)
def log_exception(self, typ, value, tb):
if isinstance(value, PermissionError):
app_log.warning("custom logging for PermissionError: %s", value.args[0])
else:
RequestHandler.log_exception(self, typ, value, tb)
def test_http_error(self):
# HTTPErrors are logged as warnings with no stack trace.
# TODO: extend ExpectLog to test this more precisely
with ExpectLog(gen_log, ".*no longer here"):
response = self.fetch("/?exc=http")
self.assertEqual(response.code, 410)
def test_unknown_error(self):
# Unknown errors are logged as errors with a stack trace.
with ExpectLog(app_log, "Uncaught exception"):
response = self.fetch("/?exc=zero")
self.assertEqual(response.code, 500)
def test_known_error(self):
# log_exception can override logging behavior, and write_error
# can override the response.
with ExpectLog(app_log, "custom logging for PermissionError: not allowed"):
response = self.fetch("/?exc=permission")
self.assertEqual(response.code, 403)
| ExceptionHandlerTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-subarray-with-equal-products.py | {
"start": 939,
"end": 1380
} | class ____(object):
def maxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 2
lookup = collections.defaultdict(int)
left = 0
for right, x in enumerate(nums):
for p in PRIME_DIVISORS[x]:
left = max(left, lookup[p])
lookup[p] = right+1
result = max(result, right-left+1)
return result
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.