language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | catalyst-team__catalyst | catalyst/contrib/layers/pooling.py | {
"start": 1776,
"end": 2512
} | class ____(nn.Module):
"""@TODO: Docs (add `Example`). Contribution is welcome."""
def __init__(self):
"""Constructor method for the ``GlobalConcatPool2d`` class."""
super().__init__()
self.avg = GlobalAvgPool2d()
self.max = GlobalMaxPool2d()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
return torch.cat([self.avg(x), self.max(x)], 1)
@staticmethod
def out_features(in_features):
"""Returns number of channels produced by the pooling.
Args:
in_features: number of channels in the input sample
Returns:
number of output features
"""
return in_features * 2
| GlobalConcatPool2d |
python | fastai__fastai | fastai/text/models/awdlstm.py | {
"start": 976,
"end": 1339
} | class ____(Module):
"Dropout with probability `p` that is consistent on the seq_len dimension."
def __init__(self, p:float=0.5): self.p=p
def forward(self, x):
if not self.training or self.p == 0.: return x
return x * dropout_mask(x.data, (x.size(0), 1, *x.shape[2:]), self.p)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 13
| RNNDropout |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/html.py | {
"start": 1562,
"end": 11753
} | class ____:
"""Split HTML content into structured Documents based on specified headers.
Splits HTML content by detecting specified header tags and creating hierarchical
`Document` objects that reflect the semantic structure of the original content. For
each identified section, the splitter associates the extracted text with metadata
corresponding to the encountered headers.
If no specified headers are found, the entire content is returned as a single
`Document`. This allows for flexible handling of HTML input, ensuring that
information is organized according to its semantic headers.
The splitter provides the option to return each HTML element as a separate
`Document` or aggregate them into semantically meaningful chunks. It also
gracefully handles multiple levels of nested headers, creating a rich,
hierarchical representation of the content.
Example:
```python
from langchain_text_splitters.html_header_text_splitter import (
HTMLHeaderTextSplitter,
)
# Define headers for splitting on h1 and h2 tags.
headers_to_split_on = [("h1", "Main Topic"), ("h2", "Sub Topic")]
splitter = HTMLHeaderTextSplitter(
headers_to_split_on=headers_to_split_on,
return_each_element=False
)
html_content = \"\"\"
<html>
<body>
<h1>Introduction</h1>
<p>Welcome to the introduction section.</p>
<h2>Background</h2>
<p>Some background details here.</p>
<h1>Conclusion</h1>
<p>Final thoughts.</p>
</body>
</html>
\"\"\"
documents = splitter.split_text(html_content)
# 'documents' now contains Document objects reflecting the hierarchy:
# - Document with metadata={"Main Topic": "Introduction"} and
# content="Introduction"
# - Document with metadata={"Main Topic": "Introduction"} and
# content="Welcome to the introduction section."
# - Document with metadata={"Main Topic": "Introduction",
# "Sub Topic": "Background"} and content="Background"
# - Document with metadata={"Main Topic": "Introduction",
# "Sub Topic": "Background"} and content="Some background details here."
# - Document with metadata={"Main Topic": "Conclusion"} and
# content="Conclusion"
# - Document with metadata={"Main Topic": "Conclusion"} and
# content="Final thoughts."
```
"""
def __init__(
self,
headers_to_split_on: list[tuple[str, str]],
return_each_element: bool = False, # noqa: FBT001,FBT002
) -> None:
"""Initialize with headers to split on.
Args:
headers_to_split_on: A list of `(header_tag,
header_name)` pairs representing the headers that define splitting
boundaries. For example, `[("h1", "Header 1"), ("h2", "Header 2")]`
will split content by `h1` and `h2` tags, assigning their textual
content to the `Document` metadata.
return_each_element: If `True`, every HTML element encountered
(including headers, paragraphs, etc.) is returned as a separate
`Document`. If `False`, content under the same header hierarchy is
aggregated into fewer `Document` objects.
"""
# Sort headers by their numeric level so that h1 < h2 < h3...
self.headers_to_split_on = sorted(
headers_to_split_on, key=lambda x: int(x[0][1:])
)
self.header_mapping = dict(self.headers_to_split_on)
self.header_tags = [tag for tag, _ in self.headers_to_split_on]
self.return_each_element = return_each_element
def split_text(self, text: str) -> list[Document]:
"""Split the given text into a list of `Document` objects.
Args:
text: The HTML text to split.
Returns:
A list of split Document objects. Each `Document` contains
`page_content` holding the extracted text and `metadata` that maps
the header hierarchy to their corresponding titles.
"""
return self.split_text_from_file(StringIO(text))
def split_text_from_url(
self, url: str, timeout: int = 10, **kwargs: Any
) -> list[Document]:
"""Fetch text content from a URL and split it into documents.
Args:
url: The URL to fetch content from.
timeout: Timeout for the request.
**kwargs: Additional keyword arguments for the request.
Returns:
A list of split Document objects. Each `Document` contains
`page_content` holding the extracted text and `metadata` that maps
the header hierarchy to their corresponding titles.
Raises:
requests.RequestException: If the HTTP request fails.
"""
response = requests.get(url, timeout=timeout, **kwargs)
response.raise_for_status()
return self.split_text(response.text)
def split_text_from_file(self, file: str | IO[str]) -> list[Document]:
"""Split HTML content from a file into a list of `Document` objects.
Args:
file: A file path or a file-like object containing HTML content.
Returns:
A list of split Document objects. Each `Document` contains
`page_content` holding the extracted text and `metadata` that maps
the header hierarchy to their corresponding titles.
"""
if isinstance(file, str):
html_content = pathlib.Path(file).read_text(encoding="utf-8")
else:
html_content = file.read()
return list(self._generate_documents(html_content))
def _generate_documents(self, html_content: str) -> Iterator[Document]:
"""Private method that performs a DFS traversal over the DOM and yields.
Document objects on-the-fly. This approach maintains the same splitting
logic (headers vs. non-headers, chunking, etc.) while walking the DOM
explicitly in code.
Args:
html_content: The raw HTML content.
Yields:
Document objects as they are created.
"""
if not _HAS_BS4:
msg = (
"Unable to import BeautifulSoup. Please install via `pip install bs4`."
)
raise ImportError(msg)
soup = BeautifulSoup(html_content, "html.parser")
body = soup.body or soup
# Dictionary of active headers:
# key = user-defined header name (e.g. "Header 1")
# value = tuple of header_text, level, dom_depth
active_headers: dict[str, tuple[str, int, int]] = {}
current_chunk: list[str] = []
def finalize_chunk() -> Document | None:
"""Finalize the accumulated chunk into a single Document."""
if not current_chunk:
return None
final_text = " \n".join(line for line in current_chunk if line.strip())
current_chunk.clear()
if not final_text.strip():
return None
final_meta = {k: v[0] for k, v in active_headers.items()}
return Document(page_content=final_text, metadata=final_meta)
# We'll use a stack for DFS traversal
stack = [body]
while stack:
node = stack.pop()
children = list(node.children)
stack.extend(
child for child in reversed(children) if isinstance(child, Tag)
)
tag = getattr(node, "name", None)
if not tag:
continue
text_elements = [
str(child).strip() for child in _find_all_strings(node, recursive=False)
]
node_text = " ".join(elem for elem in text_elements if elem)
if not node_text:
continue
dom_depth = len(list(node.parents))
# If this node is one of our headers
if tag in self.header_tags:
# If we're aggregating, finalize whatever chunk we had
if not self.return_each_element:
doc = finalize_chunk()
if doc:
yield doc
# Determine numeric level (h1->1, h2->2, etc.)
try:
level = int(tag[1:])
except ValueError:
level = 9999
# Remove any active headers that are at or deeper than this new level
headers_to_remove = [
k for k, (_, lvl, d) in active_headers.items() if lvl >= level
]
for key in headers_to_remove:
del active_headers[key]
# Add/Update the active header
header_name = self.header_mapping[tag]
active_headers[header_name] = (node_text, level, dom_depth)
# Always yield a Document for the header
header_meta = {k: v[0] for k, v in active_headers.items()}
yield Document(page_content=node_text, metadata=header_meta)
else:
headers_out_of_scope = [
k for k, (_, _, d) in active_headers.items() if dom_depth < d
]
for key in headers_out_of_scope:
del active_headers[key]
if self.return_each_element:
# Yield each element's text as its own Document
meta = {k: v[0] for k, v in active_headers.items()}
yield Document(page_content=node_text, metadata=meta)
else:
# Accumulate text in our chunk
current_chunk.append(node_text)
# If we're aggregating and have leftover chunk, yield it
if not self.return_each_element:
doc = finalize_chunk()
if doc:
yield doc
| HTMLHeaderTextSplitter |
python | google__pytype | pytype/pytd/serialize_ast.py | {
"start": 414,
"end": 526
} | class ____(Exception):
"""If a dependency can't be restored in the current state."""
| UnrestorableDependencyError |
python | pyca__cryptography | src/cryptography/x509/general_name.py | {
"start": 2125,
"end": 3223
} | class ____(GeneralName):
def __init__(self, value: str) -> None:
if isinstance(value, str):
try:
value.encode("ascii")
except UnicodeEncodeError:
raise ValueError(
"DNSName values should be passed as an A-label string. "
"This means unicode characters should be encoded via "
"a library like idna."
)
else:
raise TypeError("value must be string")
self._value = value
@property
def value(self) -> str:
return self._value
@classmethod
def _init_without_validation(cls, value: str) -> DNSName:
instance = cls.__new__(cls)
instance._value = value
return instance
def __repr__(self) -> str:
return f"<DNSName(value={self.value!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, DNSName):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
| DNSName |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 6752,
"end": 6974
} | class ____(BaseModel, extra="forbid"):
type: "BoolIndexType" = Field(..., description="")
on_disk: Optional[bool] = Field(default=None, description="If true, store the index on disk. Default: false.")
| BoolIndexParams |
python | pytorch__pytorch | test/test_utils.py | {
"start": 23278,
"end": 25969
} | class ____(TestCase):
def setUp(self):
super().setUp()
from torch.utils.hipify import hipify_python
self.trie = hipify_python.Trie()
def test_add_and_search_trie(self):
self.trie.add("banana")
self.assertTrue(self.trie.search("banana"))
self.assertFalse(self.trie.search("ban"))
self.assertFalse(self.trie.search("dog"))
def test_add_multiple_and_search_trie(self):
words_to_add = ["banana", "apple", "orange"]
for word in words_to_add:
self.trie.add(word)
for word in words_to_add:
self.assertTrue(self.trie.search(word))
for word in ["ban", "dog", "okay", "app"]:
self.assertFalse(self.trie.search(word))
def test_quote_escape(self):
orig_chars = ["*", "[", ".", "+", "a", "z", "-"]
quoted_strs = ["\\*", "\\[", "\\.", "\\+", "a", "z", "\\-"]
for i in range(len(orig_chars)):
self.assertEqual(self.trie.quote(orig_chars[i]), quoted_strs[i])
def test_export_trie_to_regex(self):
words_to_add = [
"__CUDACC__",
"CUDA_ERROR_CONTEXT_ALREADY_CURRENT",
"CUDA_ERROR_ARRAY_IS_MAPPED",
"CUDA_ERROR_NOT_MAPPED",
"CUDA_ERROR_INVALID_SOURCE",
]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = r"(?:CUDA_ERROR_(?:ARRAY_IS_MAPPED|CONTEXT_ALREADY_CURRENT|INVALID_SOURCE|NOT_MAPPED)|__CUDACC__)"
self.assertEqual(regex, expected_regex)
def test_prefix_words_export_trie_to_regex(self):
# test case where some nodes have both children and are also leaf nodes.
words_to_add = ["apple", "app", "ban", "banana"]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = r"(?:app(?:le)?|ban(?:ana)?)"
self.assertEqual(regex, expected_regex)
def test_single_export_trie_to_regex(self):
words_to_add = ["cudaErrorInvalidMemcpyDirection"]
for word in words_to_add:
self.trie.add(word)
regex = self.trie.export_to_regex()
expected_regex = "cudaErrorInvalidMemcpyDirection"
self.assertEqual(regex, expected_regex)
def test_char_export_trie_to_regex(self):
self.trie.add("a")
self.assertEqual(self.trie.export_to_regex(), "a")
self.trie.add("b")
self.assertEqual(self.trie.export_to_regex(), "[ab]")
def test_special_char_export_trie_to_regex(self):
self.trie.add(r"c*")
self.assertEqual(self.trie.export_to_regex(), r"c\*")
| TestHipifyTrie |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_boto_schema.py | {
"start": 1123,
"end": 9610
} | class ____:
def test_boto_container_schema_load(self):
schema = BotoContainerSchema()
data = {
"exitCode": 0,
"lastStatus": "STOPPED",
"name": "test_container",
"reason": "Essential container in task exited",
"containerArn": "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/1234567890abcdef0",
}
result = schema.load(data)
assert result["exit_code"] == 0
assert result["last_status"] == "STOPPED"
assert result["name"] == "test_container"
assert result["reason"] == "Essential container in task exited"
assert (
result["container_arn"]
== "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/1234567890abcdef0"
)
def test_boto_container_schema_load_minimal(self):
schema = BotoContainerSchema()
data = {"name": "minimal_container"}
result = schema.load(data)
assert result["name"] == "minimal_container"
assert result.get("exit_code") is None
assert result.get("last_status") is None
assert result.get("reason") is None
assert result.get("container_arn") is None
def test_boto_container_schema_exclude_unknown(self):
schema = BotoContainerSchema()
data = {"name": "test_container", "unknownField": "should_be_ignored"}
result = schema.load(data)
assert "unknownField" not in result
def test_boto_task_schema_load(self):
schema = BotoTaskSchema()
container_data = {
"exitCode": 0,
"lastStatus": "STOPPED",
"name": "test_container",
"reason": "Essential container in task exited",
"containerArn": "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/1234567890abcdef0",
}
data = {
"taskArn": "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0",
"lastStatus": "STOPPED",
"desiredStatus": "STOPPED",
"containers": [container_data],
"startedAt": datetime(2023, 1, 1),
"stoppedReason": "Task failed to start",
}
result = schema.load(data)
assert isinstance(result, EcsExecutorTask)
assert result.task_arn == "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0"
assert result.last_status == "STOPPED"
assert result.desired_status == "STOPPED"
assert len(result.containers) == 1
assert result.containers[0]["name"] == "test_container"
assert result.started_at == datetime(2023, 1, 1)
assert result.stopped_reason == "Task failed to start"
def test_boto_task_schema_load_minimal(self):
schema = BotoTaskSchema()
container_data = {"name": "minimal_container_in_task"}
data = {
"taskArn": "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0",
"lastStatus": "RUNNING",
"desiredStatus": "RUNNING",
"containers": [container_data],
}
result = schema.load(data)
assert isinstance(result, EcsExecutorTask)
assert result.task_arn == "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0"
assert result.last_status == "RUNNING"
assert result.desired_status == "RUNNING"
assert len(result.containers) == 1
assert result.containers[0]["name"] == "minimal_container_in_task"
assert result.started_at is None
assert result.stopped_reason is None
def test_boto_task_schema_exclude_unknown(self):
schema = BotoTaskSchema()
container_data = {"name": "test_container"}
data = {
"taskArn": "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0",
"lastStatus": "RUNNING",
"desiredStatus": "RUNNING",
"containers": [container_data],
"unknownTaskField": "should_be_ignored",
}
result = schema.load(data)
# EcsExecutorTask doesn't store unknown fields, so we check the deserialized dict before object creation
# by checking the raw data passed to EcsExecutorTask constructor if possible,
# or simply ensure no error occurs and the object is created.
# A more direct way would be to mock EcsExecutorTask and inspect its kwargs.
# For now, we just ensure it loads without error and produces the correct type.
assert isinstance(result, EcsExecutorTask)
def test_boto_failure_schema_load(self):
schema = BotoFailureSchema()
data = {
"arn": "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/1234567890abcdef0",
"reason": "MISSING",
}
result = schema.load(data)
assert result["arn"] == "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/1234567890abcdef0"
assert result["reason"] == "MISSING"
def test_boto_failure_schema_load_minimal(self):
schema = BotoFailureSchema()
data = {}
result = schema.load(data)
assert result.get("arn") is None
assert result.get("reason") is None
def test_boto_failure_schema_exclude_unknown(self):
schema = BotoFailureSchema()
data = {"arn": "test_arn", "unknownField": "should_be_ignored"}
result = schema.load(data)
assert "unknownField" not in result
def test_boto_run_task_schema_load(self):
schema = BotoRunTaskSchema()
task_data = {
"taskArn": "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0",
"lastStatus": "RUNNING",
"desiredStatus": "RUNNING",
"containers": [{"name": "test_container"}],
}
failure_data = {
"arn": "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/badabcdef0",
"reason": "MISSING",
}
data = {"tasks": [task_data], "failures": [failure_data]}
result = schema.load(data)
assert len(result["tasks"]) == 1
assert isinstance(result["tasks"][0], EcsExecutorTask)
assert (
result["tasks"][0].task_arn
== "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef0"
)
assert len(result["failures"]) == 1
assert (
result["failures"][0]["arn"]
== "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/badabcdef0"
)
assert result["failures"][0]["reason"] == "MISSING"
def test_boto_run_task_schema_exclude_unknown(self):
schema = BotoRunTaskSchema()
data = {
"tasks": [],
"failures": [],
"unknownRunTaskField": "should_be_ignored",
}
result = schema.load(data)
assert "unknownRunTaskField" not in result
def test_boto_describe_tasks_schema_load(self):
schema = BotoDescribeTasksSchema()
task_data = {
"taskArn": "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef1",
"lastStatus": "STOPPED",
"desiredStatus": "STOPPED",
"containers": [{"name": "another_container", "exitCode": 1}],
}
failure_data = {
"arn": "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/anotherbad",
"reason": "UNABLE",
}
data = {"tasks": [task_data], "failures": [failure_data]}
result = schema.load(data)
assert len(result["tasks"]) == 1
assert isinstance(result["tasks"][0], EcsExecutorTask)
assert (
result["tasks"][0].task_arn
== "arn:aws:ecs:us-east-1:123456789012:task/test-cluster/1234567890abcdef1"
)
assert result["tasks"][0].containers[0]["exit_code"] == 1
assert len(result["failures"]) == 1
assert (
result["failures"][0]["arn"]
== "arn:aws:ecs:us-east-1:123456789012:container/test-cluster/anotherbad"
)
assert result["failures"][0]["reason"] == "UNABLE"
def test_boto_describe_tasks_schema_exclude_unknown(self):
schema = BotoDescribeTasksSchema()
data = {
"tasks": [],
"failures": [],
"unknownDescribeTasksField": "should_be_ignored",
}
result = schema.load(data)
assert "unknownDescribeTasksField" not in result
| TestBotoSchema |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 11926,
"end": 11996
} | class ____(AdsInsights):
breakdowns = ["country"]
| AdsInsightsCountry |
python | django-compressor__django-compressor | compressor/exceptions.py | {
"start": 812,
"end": 950
} | class ____(Exception):
"""
This exception is raised when a template syntax error is encountered.
"""
pass
| TemplateSyntaxError |
python | plotly__plotly.py | plotly/graph_objs/layout/shape/label/_font.py | {
"start": 235,
"end": 9894
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.shape.label"
_path_str = "layout.shape.label.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the shape label text font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.shape.label.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.shape.label.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.shape.label.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | google__jax | tests/deprecation_test.py | {
"start": 811,
"end": 2886
} | class ____(absltest.TestCase):
@contextlib.contextmanager
def deprecation_context(self, deprecation_id):
deprecations.register(deprecation_id)
try:
yield
finally:
deprecations.unregister(deprecation_id)
def testModuleDeprecation(self):
with test_warning_util.raise_on_warnings():
self.assertEqual(m.x, 42)
with self.assertWarnsRegex(DeprecationWarning, "Please use x"):
self.assertEqual(m.y, 101)
with self.assertRaisesRegex(AttributeError, "Please do not use z"):
_ = m.z
with self.assertRaisesRegex(AttributeError,
"module .* has no attribute 'w'"):
_ = m.w
def testNamedDeprecation(self):
some_unique_id = "some-unique-id"
with self.deprecation_context(some_unique_id):
self.assertFalse(deprecations.is_accelerated(some_unique_id))
deprecations.accelerate(some_unique_id)
self.assertTrue(deprecations.is_accelerated(some_unique_id))
msg = f"deprecation_id={some_unique_id!r} not registered"
with self.assertRaisesRegex(ValueError, msg):
deprecations.accelerate(some_unique_id)
with self.assertRaisesRegex(ValueError, msg):
deprecations.is_accelerated(some_unique_id)
with self.assertRaisesRegex(ValueError, msg):
deprecations.unregister(some_unique_id)
def testNamedDeprecationWarns(self):
deprecation_id = "some-unique-id"
deprecation_message = "This API is deprecated."
with self.deprecation_context(deprecation_id):
self.assertFalse(deprecations.is_accelerated(deprecation_id))
with self.assertWarnsRegex(DeprecationWarning, deprecation_message):
deprecations.warn(deprecation_id, deprecation_message, stacklevel=1)
deprecations.accelerate(deprecation_id)
self.assertTrue(deprecations.is_accelerated(deprecation_id))
with self.assertRaisesRegex(ValueError, deprecation_message):
deprecations.warn(deprecation_id, deprecation_message, stacklevel=1)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| DeprecationTest |
python | PrefectHQ__prefect | tests/test_settings.py | {
"start": 103340,
"end": 109918
} | class ____:
"""Test the PREFECT_CLIENT_CUSTOM_HEADERS setting."""
def test_default_empty_dict(self):
"""Test that custom headers default to empty dict."""
from prefect.settings import get_current_settings
settings = get_current_settings()
assert settings.client.custom_headers == {}
assert isinstance(settings.client.custom_headers, dict)
def test_set_via_temporary_settings(self):
"""Test setting custom headers via temporary_settings."""
from prefect.settings import get_current_settings
custom_headers = {
"X-Test-Header": "test-value",
"Authorization": "Bearer token123",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
settings = get_current_settings()
assert settings.client.custom_headers == custom_headers
def test_json_string_parsing(self, monkeypatch: pytest.MonkeyPatch):
"""Test that JSON string values are parsed correctly."""
json_value = '{"X-Json-Header": "json-value", "Api-Key": "secret123"}'
monkeypatch.setenv("PREFECT_CLIENT_CUSTOM_HEADERS", json_value)
# Create a new settings instance to pick up the env var
from prefect.settings.models.root import Settings
settings = Settings()
expected = {"X-Json-Header": "json-value", "Api-Key": "secret123"}
assert settings.client.custom_headers == expected
def test_invalid_json_string_raises_error(self, monkeypatch: pytest.MonkeyPatch):
"""Test that invalid JSON raises appropriate error."""
monkeypatch.setenv("PREFECT_CLIENT_CUSTOM_HEADERS", "not-valid-json")
from pydantic_settings.exceptions import SettingsError
with pytest.raises(SettingsError):
from prefect.settings.models.root import Settings
Settings()
def test_non_string_values_raise_error(self):
"""Test that non-string header values raise validation error."""
import pydantic
invalid_headers = {
"X-Test-Header": 123, # Integer instead of string
"X-Another": "valid",
}
with pytest.raises(pydantic.ValidationError):
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: invalid_headers}):
from prefect.settings import get_current_settings
get_current_settings()
def test_non_string_keys_raise_error(self):
"""Test that non-string header keys raise validation error."""
import pydantic
invalid_headers = {
123: "value", # Integer key instead of string
"valid-key": "valid-value",
}
with pytest.raises(pydantic.ValidationError):
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: invalid_headers}):
from prefect.settings import get_current_settings
get_current_settings()
def test_empty_dict_valid(self):
"""Test that empty dict is valid."""
from prefect.settings import get_current_settings
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: {}}):
settings = get_current_settings()
assert settings.client.custom_headers == {}
def test_unicode_headers_supported(self):
"""Test that unicode values are supported."""
from prefect.settings import get_current_settings
unicode_headers = {
"X-Unicode-Test": "value with émojis 🚀",
"X-Chinese": "中文测试",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: unicode_headers}):
settings = get_current_settings()
assert settings.client.custom_headers == unicode_headers
def test_case_sensitivity_preserved(self):
"""Test that header name case is preserved."""
from prefect.settings import get_current_settings
mixed_case_headers = {
"X-CamelCase-Header": "value1",
"lowercase-header": "value2",
"UPPERCASE-HEADER": "value3",
}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: mixed_case_headers}):
settings = get_current_settings()
assert settings.client.custom_headers == mixed_case_headers
def test_value_as_environment_variable_json_serializable(self):
from prefect.settings import get_current_settings
custom_headers = {"X-Test-Header": "test-value"}
with temporary_settings({PREFECT_CLIENT_CUSTOM_HEADERS: custom_headers}):
settings = get_current_settings()
env_value = settings.to_environment_variables()[
"PREFECT_CLIENT_CUSTOM_HEADERS"
]
assert json.loads(env_value) == custom_headers
def test_setting_via_cli_string(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
):
"""Test setting custom headers via CLI/profile with a JSON string."""
from prefect.settings.models.root import Settings
# Clear test mode to ensure profile loading
monkeypatch.delenv("PREFECT_TESTING_TEST_MODE", raising=False)
monkeypatch.delenv("PREFECT_TESTING_UNIT_TEST_MODE", raising=False)
json_string = (
'{"X-Test-Header": "test-value", "Authorization": "Bearer token123"}'
)
# Use a temporary profiles file for isolation
profiles_path = tmp_path / "profiles.toml"
monkeypatch.setenv("PREFECT_PROFILES_PATH", str(profiles_path))
# Write a profile with the JSON string value, simulating what `prefect config set` does
profiles_path.write_text(f"""
active = "test"
[profiles.test]
PREFECT_CLIENT_CUSTOM_HEADERS = '{json_string}'
""")
# Create a new settings instance that will load from the profiles file
settings = Settings()
expected = {"X-Test-Header": "test-value", "Authorization": "Bearer token123"}
assert settings.client.custom_headers == expected
def test_prefect_custom_sources_satisfy_pydantic_warning_check() -> None:
class DummySettings(PrefectBaseSettings):
model_config = build_settings_config()
sources = (
PrefectTomlConfigSettingsSource(DummySettings),
PyprojectTomlConfigSettingsSource(DummySettings),
)
with warnings.catch_warnings(record=True) as caught:
warnings.simplefilter("always")
PrefectBaseSettings._settings_warn_unused_config_keys(
sources,
DummySettings.model_config,
)
assert not caught
| TestClientCustomHeadersSetting |
python | streamlit__streamlit | lib/tests/streamlit/file_uploader_utils_test.py | {
"start": 1918,
"end": 3923
} | class ____(unittest.TestCase):
@parameterized.expand(
[
# Valid cases
("valid_single_extension_pdf", "document.pdf", [".pdf", ".png"], True),
("valid_single_extension_png", "image.png", [".pdf", ".png"], True),
("case_insensitive", "image.png", [".PDF", ".PNG"], True),
("valid_multi_part_tar_gz", "archive.tar.gz", [".tar.gz", ".zip"], True),
("valid_multi_part_zip", "data.zip", [".tar.gz", ".zip"], True),
("valid_tar_gz_allowed_gz", "archive.tar.gz", [".gz"], True),
(
"valid_multiple_periods",
"my.file.tar.gz",
[".tar.gz", ".pdf"],
True,
),
("extension_is_uppercase", "document.CSV", [".csv"], True),
# On non-Windows, a colon is a valid filename character.
("colon_in_filename_valid", "my:file.txt", [".txt"], True),
("colon_in_filename_invalid", "my:file.txt", [".log"], False),
# Invalid cases
("invalid_single_extension", "document.docx", [".pdf", ".png"], False),
(
"invalid_multi_part_extension",
"archive.tar.bz2",
[".tar.gz", ".zip"],
False,
),
("no_extension", "file_without_extension", [".pdf", ".png"], False),
("empty_filename", "", [".pdf", ".tar.gz"], False),
("filename_is_period", ".", [".pdf", ".tar.gz"], False),
# Null byte injection
("null_byte_injection", "file.txt\0.pdf", [".pdf"], False),
]
)
def test_filename_valid(self, _, filename, allowed_types, expected_valid):
"""Test whether filenames are valid against allowed extensions."""
actual_valid = is_filename_valid(filename, allowed_types)
assert actual_valid == expected_valid
@mock.patch("streamlit.elements.lib.file_uploader_utils.os.name", "nt")
| EnforceFilenameRestrictionTest |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/views/test_roles_list.py | {
"start": 2265,
"end": 2527
} | class ____:
def test_role_model_view(self, client_roles_reader, recwarn):
resp = client_roles_reader.get("/roles/list/", follow_redirects=True)
_assert_dataset_deprecation_warning(recwarn)
assert resp.status_code == 200
| TestRolesListView |
python | bokeh__bokeh | src/bokeh/protocol/messages/server_info_reply.py | {
"start": 1547,
"end": 1609
} | class ____(TypedDict):
bokeh: str
server: str
| VersionInfo |
python | getsentry__sentry | src/sentry/seer/similarity/types.py | {
"start": 1202,
"end": 1400
} | class ____(TypedDict):
responses: list[RawSeerSimilarIssueData]
# Like the data that comes back from seer, but guaranteed to have an existing parent hash
@dataclass
| SimilarIssuesEmbeddingsResponse |
python | apache__airflow | providers/slack/src/airflow/providers/slack/operators/slack_webhook.py | {
"start": 1198,
"end": 5074
} | class ____(BaseOperator):
"""
This operator allows you to post messages to Slack using Incoming Webhooks.
.. note::
You cannot override the default channel (chosen by the user who installed your app),
username, or icon when you're using Incoming Webhooks to post messages.
Instead, these values will always inherit from the associated Slack App configuration
(`link <https://api.slack.com/messaging/webhooks#advanced_message_formatting>`_).
It is possible to change this values only in `Legacy Slack Integration Incoming Webhook
<https://api.slack.com/legacy/custom-integrations/messaging/webhooks#legacy-customizations>`_.
:param slack_webhook_conn_id: :ref:`Slack Incoming Webhook <howto/connection:slack>`
connection id that has Incoming Webhook token in the password field.
:param message: The formatted text of the message to be published.
If ``blocks`` are included, this will become the fallback text used in notifications.
:param attachments: The attachments to send on Slack. Should be a list of
dictionaries representing Slack attachments.
:param blocks: The blocks to send on Slack. Should be a list of
dictionaries representing Slack blocks.
:param channel: The channel the message should be posted to
:param username: The username to post to slack with
:param icon_emoji: The emoji to use as icon for the user posting to Slack
:param icon_url: The icon image URL string to use in place of the default icon.
:param proxy: Proxy to make the Slack Incoming Webhook call. Optional
:param timeout: The maximum number of seconds the client will wait to connect
and receive a response from Slack. Optional
:param retry_handlers: List of handlers to customize retry logic in ``slack_sdk.WebhookClient``. Optional
"""
template_fields: Sequence[str] = (
"message",
"attachments",
"blocks",
"channel",
"username",
"proxy",
)
def __init__(
self,
*,
slack_webhook_conn_id,
message: str = "",
attachments: list | None = None,
blocks: list | None = None,
channel: str | None = None,
username: str | None = None,
icon_emoji: str | None = None,
icon_url: str | None = None,
proxy: str | None = None,
timeout: int | None = None,
retry_handlers: list[RetryHandler] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.slack_webhook_conn_id = slack_webhook_conn_id
self.proxy = proxy
self.message = message
self.attachments = attachments
self.blocks = blocks
self.channel = channel
self.username = username
self.icon_emoji = icon_emoji
self.icon_url = icon_url
self.timeout = timeout
self.retry_handlers = retry_handlers
@cached_property
def hook(self) -> SlackWebhookHook:
"""Create and return an SlackWebhookHook (cached)."""
return SlackWebhookHook(
slack_webhook_conn_id=self.slack_webhook_conn_id,
proxy=self.proxy,
timeout=self.timeout,
retry_handlers=self.retry_handlers,
)
def execute(self, context: Context) -> None:
"""Call the SlackWebhookHook to post the provided Slack message."""
self.hook.send(
text=self.message,
attachments=self.attachments,
blocks=self.blocks,
# Parameters below use for compatibility with previous version of Operator and warn user if it set
# Legacy Integration Parameters
channel=self.channel,
username=self.username,
icon_emoji=self.icon_emoji,
icon_url=self.icon_url,
)
| SlackWebhookOperator |
python | weaviate__weaviate-python-client | weaviate/collections/classes/internal.py | {
"start": 3950,
"end": 4192
} | class ____:
"""The generative data returned relevant to a single prompt generative query."""
debug: Optional[generative_pb2.GenerativeDebug]
metadata: Optional[GenerativeMetadata]
text: Optional[str]
@dataclass
| GenerativeSingle |
python | pytorch__pytorch | test/inductor/test_fp8.py | {
"start": 15632,
"end": 54737
} | class ____(TestCase):
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@parametrize("dtype", (torch.bfloat16, torch.float32))
@parametrize("shape", ("16,16,32", "16,32,32", "1024,1024,512"))
@parametrize("has_bias", (False, True))
@parametrize("use_fast_accum", (False, True))
@parametrize(
"persistent_matmul", [False, True] if has_triton_tma_device() else [False]
)
def test_tensorwise_scaling(
self,
dtype: torch.dtype,
shape: str,
has_bias: bool,
use_fast_accum: bool,
persistent_matmul: bool,
):
if dtype is torch.float32 and has_bias:
self.skipTest("bias is not supported when output dtype is float32")
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
shape = [int(dim) for dim in shape.split(",")]
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
# input and output dtypes of _scaled_mm do not need to be the same, but
# typically in a model they are
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
if has_bias:
bias = torch.randn(N, device=device, dtype=torch.bfloat16)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_tensorwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
# quantize input x
x_fp8, x_inverse_scale = _quantize_tensorwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch({"triton.enable_persistent_tma_matmul": persistent_matmul}):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
# depending on the kernel config (BLOCK_M size, etc) selected during Inductor
# autotuning for the compiled case, the results can be different because of
# the way blocks of results are accumulated (float addition not associative), so
# setting a small absolute tolerance in these tests
if dtype == torch.bfloat16:
self.assertEqual(y_eager, y_compiled, rtol=5e-2, atol=0.07)
else:
self.assertEqual(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
def test_scaled_mm_preserves_strides(self):
"""Test that scaled_mm preserves stride ordering through a custom pass."""
GPU_TYPE = "cuda"
def f(a, b, scale_a, scale_b):
# Convert to fp8 with correct strides for scaled_mm
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, GPU_TYPE)
a_fp8 = a.to(dtype_float8).contiguous() # row-major
b_fp8 = b.t().contiguous().t().to(dtype_float8) # column-major
return torch._scaled_mm(
a_fp8, b_fp8, scale_a, scale_b, out_dtype=torch.bfloat16
)
class ScaledMMStridePass(PatternMatcherPass):
def __init__(self) -> None:
super().__init__()
self.called = False
def __call__(self, g: torch.fx.Graph):
# Directly manipulate the graph without using pattern matching
for node in g.nodes:
if (
node.op == "call_function"
and node.target == torch.ops.aten._scaled_mm.default
):
# Insert clone operations before scaled_mm
with g.inserting_before(node):
a_fp8, b_fp8 = node.args[0], node.args[1]
# Clone the inputs to potentially change stride ordering
a_cloned = g.call_function(
torch.ops.aten.clone,
(a_fp8,),
{"memory_format": torch.contiguous_format},
)
b_cloned = g.call_function(
torch.ops.aten.clone,
(b_fp8,),
{"memory_format": torch.contiguous_format},
)
# Replace the arguments in the scaled_mm call
node.args = (a_cloned, b_cloned) + node.args[2:]
self.called = True
g.lint()
return g
stride_pass = ScaledMMStridePass()
# Create inputs with correct strides for scaled_mm
a = torch.randn((64, 128), dtype=torch.bfloat16, device=GPU_TYPE)
b = torch.randn((128, 64), dtype=torch.bfloat16, device=GPU_TYPE)
scale_a = torch.tensor(1.0, device=GPU_TYPE)
scale_b = torch.tensor(1.0, device=GPU_TYPE)
# First, verify that f works without the pass (baseline)
expected = f(a, b, scale_a, scale_b)
from torch._inductor import config
with config.patch(post_grad_custom_post_pass=stride_pass):
f_compiled = torch.compile(f, dynamic=False)
result = f_compiled(a, b, scale_a, scale_b)
# Verify the pattern was called
self.assertTrue(stride_pass.called, "Stride ordering pass was not called")
# Verify correctness - the pass should preserve correctness
# even though it modified strides
self.assertEqual(expected, result, atol=1e-2, rtol=1e-2)
# Verify the generated code contains the clones inserted by our pass
_, (wrapper,) = run_and_get_code(f_compiled, a, b, scale_a, scale_b)
self.assertIn("scaled_mm", wrapper.lower())
# The clones should be visible in the generated code
self.assertIn("clone", wrapper.lower())
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("dtype", (torch.bfloat16, torch.float32))
@parametrize("shape", ("16,32,32", "1024,1024,512"))
@parametrize("use_fast_accum", (False, True))
def test_tensorwise_scaling_tma_template(
self,
dtype: torch.dtype,
shape: str,
use_fast_accum: bool,
):
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
shape = [int(dim) for dim in shape.split(",")]
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
# input and output dtypes of _scaled_mm do not need to be the same, but
# typically in a model they are
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_tensorwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
# quantize input x
x_fp8, x_inverse_scale = _quantize_tensorwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch(
{
"triton.enable_persistent_tma_matmul": True,
"test_configs.autotune_choice_name_regex": "triton_scaled_mm_device_tma",
"max_autotune_gemm_backends": "TRITON",
"max_autotune": True,
}
):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled, code = run_and_get_code(
linear_compiled,
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
FileCheck().check(
f"SCALE_RECIPE_A : tl.constexpr = {ScalingType.TensorWise.value}"
).run(code[0])
FileCheck().check(
f"SCALE_RECIPE_B : tl.constexpr = {ScalingType.TensorWise.value}"
).run(code[0])
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
# depending on the kernel config (BLOCK_M size, etc) selected during Inductor
# autotuning for the compiled case, the results can be different because of
# the way blocks of results are accumulated (float addition not associative), so
# setting a small absolute tolerance in these tests
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@parametrize("shape", ("16,16,32", "16,32,32", "1024,1024,512"))
@parametrize("has_bias", (False, True))
@parametrize("use_fast_accum", (False, True))
@parametrize(
"persistent_matmul", [False, True] if has_triton_tma_device() else [False]
)
def test_rowwise_scaling(
self, shape: str, has_bias: bool, use_fast_accum: bool, persistent_matmul: bool
):
# Only bf16 output type is supported for row-wise scaling, not fp32
dtype: torch.dtype = torch.bfloat16
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
shape = [int(dim) for dim in shape.split(",")]
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
if has_bias:
bias = torch.randn(N, device=device, dtype=torch.bfloat16)
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch({"triton.enable_persistent_tma_matmul": persistent_matmul}):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=5e-2, atol=0.07)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@parametrize("shape", ("16,32,32", "1024,1024,512"))
@parametrize("use_fast_accum", (False, True))
def test_rowwise_scaling_tma_template(
self,
shape: str,
use_fast_accum: bool,
):
# Only bf16 output type is supported for row-wise scaling, not fp32
dtype: torch.dtype = torch.bfloat16
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
shape = [int(dim) for dim in shape.split(",")]
M, K, N = shape # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch(
{
"triton.enable_persistent_tma_matmul": True,
"test_configs.autotune_choice_name_regex": "triton_scaled_mm_device_tma",
"max_autotune_gemm_backends": "TRITON",
"max_autotune": True,
}
):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled, code = run_and_get_code(
linear_compiled,
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
FileCheck().check(
f"SCALE_RECIPE_A : tl.constexpr = {ScalingType.RowWise.value}"
).run(code[0])
FileCheck().check(
f"SCALE_RECIPE_B : tl.constexpr = {ScalingType.RowWise.value}"
).run(code[0])
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@unittest.skipIf(
not has_triton_tma_device(), "Need device-side TMA support in Triton"
)
@unittest.skipIf(
_get_torch_cuda_version() < (12, 9),
"cuBLAS blockwise scaling added in CUDA 12.9",
)
@parametrize("shape", ((16, 256, 256), (1024, 512, 1024)))
@parametrize("use_fast_accum", (False, True))
@parametrize(
"scaling_block_sizes", ((1, 128, 128, 128), (1, 128, 1, 128))
) # (BlockWise1x128, BlockWise128x128), (BlockWise1x128, BlockWise1x128)
def test_main_loop_scaling(
self,
shape: tuple[int, int, int],
use_fast_accum: bool,
scaling_block_sizes: tuple[int, int, int, int],
):
# Only bf16 output type is supported for non-tensorwise scaling, not fp32
dtype: torch.dtype = torch.bfloat16
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
M, N, K = shape # Matmul Y = X [M, K] x W [N, K]
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
am, ak, bn, bk = scaling_block_sizes
# quantize weight (prior to inference)
w_fp8, w_inverse_scale = _quantize_blockwise(
w, dtype_float8, block_outer=bn, block_inner=bk
)
w_t_fp8 = w_fp8.t()
if (bn, bk) == (1, 128):
w_inverse_scale = (
w_inverse_scale.t().contiguous().t().t()
) # 1x128 blocks need scales to be outer-dim-major
else:
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
# quantize input x
x_fp8, x_inverse_scale = _quantize_blockwise(
x, dtype_float8, block_outer=am, block_inner=ak
)
if (am, ak) == (1, 128):
x_inverse_scale = (
x_inverse_scale.t().contiguous().t()
) # 1x128 blocks need scales to be outer-dim-major
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch(
{
"triton.enable_persistent_tma_matmul": True,
"test_configs.autotune_choice_name_regex": "triton_scaled_mm_device_tma",
"max_autotune_gemm_backends": "TRITON",
"max_autotune": True,
}
):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled, code = run_and_get_code(
linear_compiled,
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
# Verify that Inductor chooses the correct scaling recipes
FileCheck().check(
f"SCALE_RECIPE_A : tl.constexpr = {ScalingType.BlockWise1x128.value}"
).run(code[0])
if (bn, bk) == (1, 128):
check_scale_recipe_b = ScalingType.BlockWise1x128.value
else:
check_scale_recipe_b = ScalingType.BlockWise128x128.value
FileCheck().check(
f"SCALE_RECIPE_B : tl.constexpr = {check_scale_recipe_b}"
).run(code[0])
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@parametrize("M", (1, 3, 33, 257, 1024))
@parametrize("K", (16, 32, 1024))
@parametrize("N", (16, 2048))
@parametrize(
"persistent_matmul", [False, True] if has_triton_tma_device() else [False]
)
def test_tensorwise_scaling_acceptable_input_dims(
self, M: int, K: int, N: int, persistent_matmul: bool
):
# alignment requirements: K and N divisible by 16
dtype: torch.dtype = torch.bfloat16
use_fast_accum = True
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = None
w_fp8, w_inverse_scale = _quantize_tensorwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
x_fp8, x_inverse_scale = _quantize_tensorwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch({"triton.enable_persistent_tma_matmul": persistent_matmul}):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=5e-2, atol=0.07)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@torch._inductor.config.patch("emulate_precision_casts", True)
def test_mx_fusion(self):
# Register fake_scaled_mm custom op scoped to this test
with torch.library._scoped_library("test_fp8", "FRAGMENT") as lib:
# Define the op schema
lib.define(
"fake_scaled_mm(Tensor mat_a, Tensor mat_b, Tensor scale_a, Tensor scale_b, "
"Tensor? bias=None, Tensor? scale_result=None, ScalarType? out_dtype=None, "
"bool use_fast_accum=False) -> Tensor"
)
input_values = []
# Register CUDA implementation
@torch.library.impl(lib, "fake_scaled_mm", "CUDA")
def fake_scaled_mm_impl(
mat_a,
mat_b,
scale_a,
scale_b,
bias=None,
scale_result=None,
out_dtype=None,
use_fast_accum=False,
):
"""Software-emulated scaled_mm for testing without CUDA 12.8"""
out_dtype = out_dtype or torch.bfloat16
# just using add, because without real dtypes,
# was seeing overflow/instability
nonlocal input_values
input_values.append((mat_a, mat_b, scale_a, scale_b))
result = mat_a.to(torch.float32) + mat_b.to(torch.float32)
if bias is not None:
result = result + bias.to(torch.float32)
return result.to(out_dtype)
# Register fake implementation
@torch.library.impl(lib, "fake_scaled_mm", "Meta")
def fake_scaled_mm_meta(
mat_a,
mat_b,
scale_a,
scale_b,
bias=None,
scale_result=None,
out_dtype=None,
use_fast_accum=False,
):
"""FakeTensor implementation"""
out_dtype = out_dtype or torch.bfloat16
M, K = mat_a.shape
K2, N = mat_b.shape
torch._check(
K == K2,
lambda: f"Incompatible shapes: {mat_a.shape} @ {mat_b.shape}",
)
return torch.empty((M, N), dtype=out_dtype, device=mat_a.device)
def forward(
arg0_1,
arg1_1,
):
view = torch.ops.aten.reshape.default(arg0_1, [8192, 256, 32])
abs_1 = torch.ops.aten.abs.default(view)
amax = torch.ops.aten.amax.default(abs_1, [-1])
unsqueeze = torch.ops.aten.unsqueeze.default(amax, -1)
view_1 = torch.ops.aten.view.dtype(unsqueeze, torch.int32)
bitwise_right_shift = torch.ops.aten.bitwise_right_shift.Tensor_Scalar(
view_1, 23
)
bitwise_and = torch.ops.aten.bitwise_and.Scalar(
bitwise_right_shift, 255
)
sub = torch.ops.aten.sub.Tensor(bitwise_and, 127)
sub_1 = torch.ops.aten.sub.Tensor(sub, 8)
clamp_min = torch.ops.aten.clamp_min.default(sub_1, -127)
clamp_max = torch.ops.aten.clamp_max.default(clamp_min, 128)
add = torch.ops.aten.add.Tensor(clamp_max, 127)
convert_element_type = torch.ops.prims.convert_element_type.default(
add, torch.uint8
)
isnan = torch.ops.aten.isnan.default(unsqueeze)
scalar_tensor = torch.ops.aten.scalar_tensor.default(
255, dtype=torch.uint8, layout=torch.strided, device="cuda"
)
where = torch.ops.aten.where.self(
isnan, scalar_tensor, convert_element_type
)
convert_element_type_1 = torch.ops.prims.convert_element_type.default(
where, torch.int32
)
bitwise_left_shift = torch.ops.aten.bitwise_left_shift.Tensor_Scalar(
convert_element_type_1, 23
)
view_2 = torch.ops.aten.view.dtype(bitwise_left_shift, torch.float32)
clamp_min_1 = torch.ops.aten.clamp_min.default(
view_2, 1.1754943508222875e-38
)
div = torch.ops.aten.div.Tensor(view, clamp_min_1)
clamp_min_2 = torch.ops.aten.clamp_min.default(div, -448.0)
clamp_max_1 = torch.ops.aten.clamp_max.default(clamp_min_2, 448.0)
convert_element_type_2 = torch.ops.prims.convert_element_type.default(
clamp_max_1, torch.float8_e4m3fn
)
view_3 = torch.ops.aten.reshape.default(
convert_element_type_2, [8192, 8192]
)
convert_element_type_2 = None
view_4 = torch.ops.aten.view.dtype(where, torch.float8_e8m0fnu)
squeeze = torch.ops.aten.squeeze.dim(view_4, -1)
view_5 = torch.ops.aten.reshape.default(arg1_1, [8192, 256, 32])
abs_2 = torch.ops.aten.abs.default(view_5)
amax_1 = torch.ops.aten.amax.default(abs_2, [-1])
unsqueeze_1 = torch.ops.aten.unsqueeze.default(amax_1, -1)
view_6 = torch.ops.aten.view.dtype(unsqueeze_1, torch.int32)
bitwise_right_shift_1 = (
torch.ops.aten.bitwise_right_shift.Tensor_Scalar(view_6, 23)
)
bitwise_and_1 = torch.ops.aten.bitwise_and.Scalar(
bitwise_right_shift_1, 255
)
sub_2 = torch.ops.aten.sub.Tensor(bitwise_and_1, 127)
sub_3 = torch.ops.aten.sub.Tensor(sub_2, 8)
clamp_min_3 = torch.ops.aten.clamp_min.default(sub_3, -127)
clamp_max_2 = torch.ops.aten.clamp_max.default(clamp_min_3, 128)
add_1 = torch.ops.aten.add.Tensor(clamp_max_2, 127)
convert_element_type_3 = torch.ops.prims.convert_element_type.default(
add_1, torch.uint8
)
isnan_1 = torch.ops.aten.isnan.default(unsqueeze_1)
unsqueeze_1 = None
scalar_tensor_1 = torch.ops.aten.scalar_tensor.default(
255, dtype=torch.uint8, layout=torch.strided, device="cuda"
)
where_1 = torch.ops.aten.where.self(
isnan_1, scalar_tensor_1, convert_element_type_3
)
convert_element_type_4 = torch.ops.prims.convert_element_type.default(
where_1, torch.int32
)
bitwise_left_shift_1 = torch.ops.aten.bitwise_left_shift.Tensor_Scalar(
convert_element_type_4, 23
)
convert_element_type_4 = None
view_7 = torch.ops.aten.view.dtype(bitwise_left_shift_1, torch.float32)
bitwise_left_shift_1 = None
clamp_min_4 = torch.ops.aten.clamp_min.default(
view_7, 1.1754943508222875e-38
)
div_1 = torch.ops.aten.div.Tensor(view_5, clamp_min_4)
clamp_min_5 = torch.ops.aten.clamp_min.default(div_1, -448.0)
clamp_max_3 = torch.ops.aten.clamp_max.default(clamp_min_5, 448.0)
convert_element_type_5 = torch.ops.prims.convert_element_type.default(
clamp_max_3, torch.float8_e4m3fn
)
view_8 = torch.ops.aten.reshape.default(
convert_element_type_5, [8192, 8192]
)
view_9 = torch.ops.aten.view.dtype(where_1, torch.float8_e8m0fnu)
squeeze_1 = torch.ops.aten.squeeze.dim(view_9, -1)
permute = torch.ops.aten.permute.default(view_8, [1, 0])
view_13 = torch.ops.aten.reshape.default(squeeze, [64, 128, 64, 4])
permute_2 = torch.ops.aten.permute.default(view_13, [0, 2, 1, 3])
clone = torch.ops.aten.clone.default(
permute_2, memory_format=torch.contiguous_format
)
view_14 = torch.ops.aten.reshape.default(clone, [4096, 4, 32, 4])
permute_3 = torch.ops.aten.permute.default(view_14, [0, 2, 1, 3])
clone_1 = torch.ops.aten.clone.default(
permute_3, memory_format=torch.contiguous_format
)
view_15 = torch.ops.aten.reshape.default(clone_1, [4096, 32, 16])
view_16 = torch.ops.aten.reshape.default(view_15, [2097152])
view_18 = torch.ops.aten.reshape.default(squeeze_1, [64, 128, 64, 4])
permute_5 = torch.ops.aten.permute.default(view_18, [0, 2, 1, 3])
clone_2 = torch.ops.aten.clone.default(
permute_5, memory_format=torch.contiguous_format
)
view_19 = torch.ops.aten.reshape.default(clone_2, [4096, 4, 32, 4])
permute_6 = torch.ops.aten.permute.default(view_19, [0, 2, 1, 3])
clone_3 = torch.ops.aten.clone.default(
permute_6, memory_format=torch.contiguous_format
)
view_20 = torch.ops.aten.reshape.default(clone_3, [4096, 32, 16])
view_21 = torch.ops.aten.reshape.default(view_20, [2097152])
_scaled_mm = torch.ops.test_fp8.fake_scaled_mm.default(
view_3, permute, view_16, view_21, None, None, torch.float32
)
return (_scaled_mm,)
# Run with largest shape
M, K, N = 8192, 8192, 8192
device = "cuda"
A = torch.randn(M, K, dtype=torch.float32, device=device)
B = torch.randn(K, N, dtype=torch.float32, device=device)
f_c = torch.compile(fullgraph=True)(forward)
_, code = run_and_get_code(f_c, A, B)
FileCheck().check(".run(").check(".run(").check("fake_scaled_mm").run(
code[0]
)
for seed in range(5):
input_values.clear()
torch.manual_seed(seed)
# without dividing, outputs get way too large
A = torch.randn(M, K, dtype=torch.float32, device=device)
B = torch.randn(K, N, dtype=torch.float32, device=device)
# Uses fake_scaled_mm custom op (no CUDA 12.8 needed!)
torch._dynamo.reset()
torch.compile(forward)(A, B)
torch._dynamo.reset()
with config.patch({"loop_index_inversion_in_fusion": False}):
torch.compile(forward)(A, B)
assert len(input_values) == 2
for i in range(4):
self.assertEqual(
input_values[0][i],
input_values[1][i],
msg=f"idx {i} seed {seed}",
)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
@parametrize("M", (1, 3, 33, 257, 1024))
@parametrize("K", (16, 32, 1024))
@parametrize("N", (16, 2048))
@parametrize(
"persistent_matmul", [False, True] if has_triton_tma_device() else [False]
)
def test_rowwise_scaling_acceptable_input_dims(
self, M: int, K: int, N: int, persistent_matmul: bool
):
dtype: torch.dtype = torch.bfloat16
use_fast_accum = True
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = torch.randn(N, device=device, dtype=torch.bfloat16)
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
w_inverse_scale = w_inverse_scale.t() # scale_b should be (1, N)
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype_float8)
def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=use_fast_accum,
)
return y
y_eager = linear(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
with config.patch({"triton.enable_persistent_tma_matmul": persistent_matmul}):
linear_compiled = torch.compile(
linear, backend="inductor", mode="max-autotune"
)
y_compiled = linear_compiled(
x_fp8,
x_inverse_scale,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.07)
@unittest.skipIf(not PLATFORM_SUPPORTS_MX_GEMM, "Not supported on non B200")
def test_mx_fp8_max_autotune(self):
M, K, N = 128, 32, 128
BLOCK_SIZE = 32
device = "cuda"
dtype = torch.bfloat16
A_ref = torch.eye(M, device=device, dtype=torch.bfloat16)
B_ref = torch.eye(N, device=device, dtype=torch.bfloat16)
A = A_ref.to(torch.float8_e4m3fn)
B = B_ref.to(torch.float8_e4m3fn)
A_scale = torch.full(
(M, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu
)
B_scale = torch.full(
(N, ceil_div(K, BLOCK_SIZE)), 1.0, device=device, dtype=torch.float8_e8m0fnu
)
A_scale = to_blocked(A_scale)
B_scale = to_blocked(B_scale)
def linear(A, B, A_scale, B_scale):
y = torch._scaled_mm(
A,
B.t(),
A_scale,
B_scale,
out_dtype=torch.bfloat16,
use_fast_accum=False,
)
return y
y_eager = linear(A, B, A_scale, B_scale)
linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune")
y_compiled = linear_compiled(A, B, A_scale, B_scale)
self.assertEqual(y_eager.dtype, dtype)
self.assertEqual(y_compiled.dtype, dtype)
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.07)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
def test_unacceptable_input_dims(self):
# for compiled ops, type checking is in torch/_meta_registrations.py
dtype: torch.dtype = torch.bfloat16
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
M, K, N = 64, 15, 2048 # K needs to be a multiple of 16
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = torch.randn(N, device=device, dtype=torch.bfloat16)
w_fp8, w_inverse_scale = _quantize_tensorwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
def linear(x, w_t_fp8, w_inverse_scale, bias):
x_fp8, x_inverse_scale = _quantize_tensorwise(x, dtype_float8)
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
x_inverse_scale,
w_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=True,
)
return y
linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune")
with self.assertRaises(torch._dynamo.exc.TorchRuntimeError) as cm:
linear_compiled(
x,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertTrue(
f"Expected self.size(1) to be divisible by 16, but got self.size(1)={K}"
in str(cm.exception)
)
@unittest.skipIf(not PLATFORM_SUPPORTS_FP8, f8_msg)
def test_unacceptable_scale_dims_rowwise_scaling(self):
dtype: torch.dtype = torch.bfloat16
device = "cuda"
dtype_float8 = torch.float8_e4m3fn
dtype_float8 = _fix_fp8_dtype_for_rocm(dtype_float8, device)
M, K, N = 233, 32, 128
x = torch.randn(M, K, dtype=dtype, device=device)
w = torch.randn(N, K, dtype=dtype, device=device)
bias = torch.randn(N, device=device, dtype=torch.bfloat16)
w_fp8, w_inverse_scale = _quantize_rowwise(w, dtype_float8)
w_t_fp8 = w_fp8.t()
def linear(x, w_t_fp8, w_inverse_scale, bias):
x_fp8, x_inverse_scale = _quantize_rowwise(x, dtype_float8)
y = torch._scaled_mm(
x_fp8,
w_t_fp8,
w_inverse_scale.t(), # testing with w and x scales switched
x_inverse_scale,
bias,
out_dtype=dtype,
use_fast_accum=True,
)
return y
linear_compiled = torch.compile(linear, backend="inductor", mode="max-autotune")
with self.assertRaises(torch._dynamo.exc.TorchRuntimeError) as cm:
linear_compiled(
x,
w_t_fp8,
w_inverse_scale,
bias,
)
self.assertTrue("Invalid scaling configuration." in str(cm.exception))
if __name__ == "__main__":
if HAS_CUDA_AND_TRITON or HAS_CPU:
run_tests()
| TestFP8Lowering |
python | tensorflow__tensorflow | tensorflow/python/module/module_test.py | {
"start": 13161,
"end": 13296
} | class ____(AbstractModule):
@module.Module.with_name_scope
def __call__(self, x):
return x ** 2, get_name_scope()
| ConcreteModule |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/bundle.py | {
"start": 215,
"end": 466
} | class ____(PackageBase):
"""General purpose bundle, or no-code, package class."""
build_system_class = "BundlePackage"
default_buildsystem = "bundle"
has_code = False
build_system("bundle")
@register_builder("bundle")
| BundlePackage |
python | catalyst-team__catalyst | catalyst/contrib/data/reader.py | {
"start": 3088,
"end": 4442
} | class ____(IReader):
"""
Reader abstraction with an lambda encoders.
Can read an elem from dataset and apply `encode_fn` function to it.
"""
def __init__(
self,
input_key: str,
output_key: Optional[str] = None,
lambda_fn: Optional[Callable] = None,
**kwargs,
):
"""
Args:
input_key: input key to use from annotation dict
output_key: output key to use to store the result
lambda_fn: encode function to use to prepare your data
(for example convert chars/words/tokens to indices, etc)
kwargs: kwargs for encode function
"""
super().__init__(input_key, output_key)
lambda_fn = lambda_fn or (lambda x: x)
self.lambda_fn = functools.partial(lambda_fn, **kwargs)
def __call__(self, element):
"""
Reads a row from your annotations dict
and applies `encode_fn` function.
Args:
element: elem in your dataset.
Returns:
Value after applying `lambda_fn` function
"""
if self.input_key is not None:
element = element[self.input_key]
output = self.lambda_fn(element)
if self.output_key is not None:
output = {self.output_key: output}
return output
| LambdaReader |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/node.py | {
"start": 3309,
"end": 4565
} | class ____:
mapped_node_name: str
mapped_input_name: str
external_input_name: str
def build_input_mapping_snap(input_mapping: InputMapping) -> InputMappingSnap:
return InputMappingSnap(
mapped_node_name=input_mapping.maps_to.node_name,
mapped_input_name=input_mapping.maps_to.input_name,
external_input_name=input_mapping.graph_input_name,
)
def build_input_def_snap(input_def: InputDefinition) -> InputDefSnap:
check.inst_param(input_def, "input_def", InputDefinition)
return InputDefSnap(
name=input_def.name,
dagster_type_key=input_def.dagster_type.key,
description=input_def.description,
metadata=input_def.metadata,
)
def build_output_def_snap(output_def: OutputDefinition) -> OutputDefSnap:
check.inst_param(output_def, "output_def", OutputDefinition)
return OutputDefSnap(
name=output_def.name,
dagster_type_key=output_def.dagster_type.key,
description=output_def.description,
is_required=output_def.is_required,
metadata=output_def.metadata,
is_dynamic=output_def.is_dynamic,
)
@whitelist_for_serdes(storage_name="CompositeSolidDefSnap", skip_when_empty_fields={"pools"})
@record
| InputMappingSnap |
python | donnemartin__interactive-coding-challenges | graphs_trees/graph/graph.py | {
"start": 1148,
"end": 2079
} | class ____:
def __init__(self):
self.nodes = {} # Key = key, val = Node
def add_node(self, key):
if key is None:
raise TypeError('key cannot be None')
if key not in self.nodes:
self.nodes[key] = Node(key)
return self.nodes[key]
def add_edge(self, source_key, dest_key, weight=0):
if source_key is None or dest_key is None:
raise KeyError('Invalid key')
if source_key not in self.nodes:
self.add_node(source_key)
if dest_key not in self.nodes:
self.add_node(dest_key)
self.nodes[source_key].add_neighbor(self.nodes[dest_key], weight)
def add_undirected_edge(self, src_key, dst_key, weight=0):
if src_key is None or dst_key is None:
raise TypeError('key cannot be None')
self.add_edge(src_key, dst_key, weight)
self.add_edge(dst_key, src_key, weight)
| Graph |
python | numba__numba | numba/cuda/types.py | {
"start": 31,
"end": 207
} | class ____(types.Type):
"""
A 3-tuple (x, y, z) representing the position of a block or thread.
"""
def __init__(self):
super().__init__(name='Dim3')
| Dim3 |
python | pytorch__pytorch | torch/_tensor.py | {
"start": 3333,
"end": 76306
} | class ____(torch._C.TensorBase):
_is_param: bool
def _clear_non_serializable_cached_data(self):
r"""Clears any data cached in the tensor's ``__dict__`` that would prevent the tensor
from being serialized.
For example, subclasses with custom dispatched sizes / strides cache this info in
non-serializable PyCapsules within the ``__dict__``, and this must be cleared out for
serialization to function.
Any subclass that overrides this MUST call ``super()._clear_non_serializable_cached_data().``
Additional data cleared within the override must be able to be re-cached transparently
to avoid breaking subclass functionality.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor._clear_non_serializable_cached_data, (self,), self
)
# NB: Wrapper subclasses that implement custom-dispatched sizes / strides cache
# this info via non-serializable PyCapsules.
CACHED_SIZES_STRIDES_KEYS = [
"_sym_sizes_capsule",
"_sym_sizes_capsule_len",
"_sym_strides_capsule",
"_sym_strides_capsule_len",
]
for key in CACHED_SIZES_STRIDES_KEYS:
self.__dict__.pop(key, None)
def __deepcopy__(self, memo):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__deepcopy__, (self,), self, memo)
if not self.is_leaf:
raise RuntimeError(
"Only Tensors created explicitly by the user "
"(graph leaves) support the deepcopy protocol at the moment. "
"If you were attempting to deepcopy a module, this may be because "
"of a torch.nn.utils.weight_norm usage, "
"see https://github.com/pytorch/pytorch/pull/103001"
)
if id(self) in memo:
return memo[id(self)]
with torch.no_grad():
# TODO: skipping storage copy is wrong for meta, as meta
# does accurate alias tracking; however, the code below
# doesn't work because of
# https://github.com/pytorch/pytorch/issues/47442
# Update the test in test_serialization if you remove 'meta' from here
if (
self.is_sparse
or self.device.type
in ["lazy", "xla", "mtia", "mps", "maia", "meta", "ipu"]
or (
not torch._C._has_storage(self)
and self.device.type == torch._C._get_privateuse1_backend_name()
)
or (type(self) is not Tensor and self.data_ptr() == 0)
):
new_tensor = self.clone()
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for wrapper subclasses "
"only works for subclass types that implement clone() and for which "
"cloning returns another instance of the same subclass. You should either "
"properly implement clone() for your subclass or override __deepcopy__() "
"if it is intended behavior for clone() to return an instance of a "
"different type."
)
else:
new_storage = self._typed_storage()._deepcopy(memo)
if self.is_quantized:
# quantizer_params can be different type based on torch attribute
quantizer_params: Union[
tuple[torch.qscheme, float, int],
tuple[torch.qscheme, Tensor, Tensor, int],
]
if self.qscheme() == torch.per_tensor_affine:
quantizer_params = (
self.qscheme(),
self.q_scale(),
self.q_zero_point(),
)
elif self.qscheme() in (
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
):
quantizer_params = (
self.qscheme(),
self.q_per_channel_scales(),
self.q_per_channel_zero_points(),
self.q_per_channel_axis(),
)
else:
raise RuntimeError(
f"Unsupported qscheme {self.qscheme()} in deepcopy"
)
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
new_tensor = torch._utils._rebuild_qtensor(
torch.storage.TypedStorage(
wrap_storage=new_storage._untyped_storage,
dtype=self.dtype,
_internal=True,
),
self.storage_offset(),
self.size(),
self.stride(),
quantizer_params,
self.requires_grad,
self._backward_hooks,
)
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for quantized tensors "
"expects the tensor returned by torch._utils._rebuild_qtensor() to "
"match the type of the instance being copied. If you encounter this, "
"please open an issue on PyTorch's GitHub."
)
else:
new_tensor = self.new_empty([])
if type(new_tensor) is not type(self):
raise RuntimeError(
"The default implementation of __deepcopy__() for non-wrapper subclasses "
"only works for subclass types that implement new_empty() and for which "
"that function returns another instance of the same subclass. You should "
"either properly implement new_empty() for your subclass or override "
"__deepcopy__() if it is intended behavior for new_empty() to return "
"an instance of a different type."
)
new_tensor.set_(
new_storage, self.storage_offset(), self.size(), self.stride()
)
if self.is_conj():
new_tensor = new_tensor.conj_physical()
if self.is_neg():
new_tensor = new_tensor.neg()
if self.requires_grad:
new_tensor.requires_grad_()
if self.grad is not None:
new_tensor.grad = self.grad.__deepcopy__(memo)
if type(self) is not Tensor:
if type(new_tensor) is not type(self):
raise RuntimeError(
"Type of deepcopy result does not match the type of the source tensor. "
"If you encounter this, please open an issue on PyTorch's GitHub."
)
# Plain Tensors don't have slots
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
for slot in slots_to_save:
if hasattr(self, slot):
setattr(new_tensor, slot, deepcopy(getattr(self, slot), memo))
# don't try to deepcopy non-serializable cached data
self._clear_non_serializable_cached_data()
new_tensor.__dict__ = deepcopy(self.__dict__, memo)
memo[id(self)] = new_tensor
return new_tensor
def __reduce_ex__(self, proto):
materialize_fake_tensors = (
torch.serialization._serialization_tls.materialize_fake_tensors
)
state = torch._utils._get_obj_state(self)
# Ignore all state when using FakeTensor with skip_data(materialize_fake_tensors) because FakeTensor has
# some state that cannot be pickled
if (
# TODO: remove hasattr, it's a hack to support versions of torch that
# don't have _subclasses
hasattr(torch, "_subclasses")
and type(self) is torch._subclasses.fake_tensor.FakeTensor
and materialize_fake_tensors
) or (type(self) is Tensor and not state):
# Fast path for regular tensor without Python state.
return self._reduce_ex_internal(proto)
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__reduce_ex__, (self,), self, proto)
func, args = self._reduce_ex_internal(proto)
# sizes / strides cache needs to be cleared here because it'll just be re-cached
# if cleared earlier. Note that state references the -actual- tensor dict.
self._clear_non_serializable_cached_data()
return (_rebuild_from_type_v2, (func, type(self), args, state))
def storage(self):
r"""
storage() -> torch.TypedStorage
Returns the underlying :class:`TypedStorage`.
.. warning::
:class:`TypedStorage` is deprecated. It will be removed in the future, and
:class:`UntypedStorage` will be the only storage class. To access the
:class:`UntypedStorage` directly, use :attr:`Tensor.untyped_storage()`.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.storage, (self,), self)
torch.storage._warn_typed_storage_removal(stacklevel=2)
return self._typed_storage()
# For internal use only, to avoid raising deprecation warning
def _typed_storage(self):
untyped_storage = self.untyped_storage()
return torch.TypedStorage(
wrap_storage=untyped_storage, dtype=self.dtype, _internal=True
)
def _reduce_ex_internal(self, proto):
check_serializing_named_tensor(self)
from torch.utils.hooks import warn_if_has_hooks
# See Note [Don't serialize hooks]
warn_if_has_hooks(self)
backward_hooks: dict[Any, Any] = OrderedDict()
skip_data = torch.serialization._serialization_tls.skip_data
materialize_fake_tensors = (
torch.serialization._serialization_tls.materialize_fake_tensors
)
if self.device.type in ["xla", "maia", "mtia"] or (
not torch._C._has_storage(self)
and self.device.type == torch._C._get_privateuse1_backend_name()
):
if skip_data:
raise RuntimeError(
"Cannot serialize tensors on backends with no storage under skip_data context manager"
)
cpu_tensor = self.cpu()
return (
torch._utils._rebuild_device_tensor_from_cpu_tensor,
(cpu_tensor, self.dtype, str(self.device), self.requires_grad),
)
if self.device.type == "meta":
# NB: This implementation BREAKS storage sharing. Current
# hypothesis is that no one cares for meta tensors.
if skip_data:
warnings.warn(
"Serializing tensors on the meta device under skip_data context manager is a no-op",
stacklevel=2,
)
arg_meta = (
self.dtype,
tuple(self.size()),
self.stride(),
self.requires_grad,
)
return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
if self.is_quantized:
if skip_data:
raise RuntimeError(
"Cannot serialize qtensor under skip_data context manager, file an issue if you need this feature"
)
# quantizer_params can be different type based on torch attribute
quantizer_params: Union[
tuple[torch.qscheme, float, int], tuple[Any, Tensor, Tensor, int]
]
if self.qscheme() == torch.per_tensor_affine:
quantizer_params = (
torch.per_tensor_affine,
self.q_scale(),
self.q_zero_point(),
)
elif self.qscheme() in (
torch.per_channel_affine,
torch.per_channel_affine_float_qparams,
):
# convert scales and zero points to tuple to avoid recursive calls
# when/if we get multi-axis quantized tensors in the future, the shape
# is recoverable from the main tensor shape
quantizer_params = (
torch.per_channel_affine,
self.q_per_channel_scales(),
self.q_per_channel_zero_points(),
self.q_per_channel_axis(),
)
else:
raise RuntimeError(
f"Serialization is not supported for tensors of type {self.qscheme()}"
)
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
args_qtensor = (
torch.storage.TypedStorage(
wrap_storage=self._typed_storage()._untyped_storage,
dtype=self.dtype,
_internal=True,
),
self.storage_offset(),
tuple(self.size()),
self.stride(),
quantizer_params,
self.requires_grad,
backward_hooks,
)
return (torch._utils._rebuild_qtensor, args_qtensor)
elif self.is_sparse:
if self.layout == torch.sparse_coo:
args_sparse = (
self.layout,
(self._indices(), self._values(), self.size(), self.is_coalesced()),
)
else:
raise NotImplementedError(
f"sparse tensor __reduce_ex__ for layout `{self.layout}`"
)
return (torch._utils._rebuild_sparse_tensor, args_sparse)
elif self.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
compressed_indices, plain_indices = (
self.crow_indices(),
self.col_indices(),
)
else:
compressed_indices, plain_indices = (
self.ccol_indices(),
self.row_indices(),
)
args_sparse_compressed = (
self.layout,
(
compressed_indices,
plain_indices,
self.values(),
self.size(),
),
)
return (torch._utils._rebuild_sparse_tensor, args_sparse_compressed)
elif self.is_nested:
if skip_data:
raise RuntimeError(
"Cannot serialize nested tensor under skip_data context manager, file an issue if you need this feature"
)
args_nested = (
# NB: values() currently returns the storage as a buffer in an unsafe way.
# Ideally, we'd use a private API for this instead. TODO: Switch to this if
# we ever get around to adding it.
self.values(),
self._nested_tensor_size(),
self._nested_tensor_strides(),
self._nested_tensor_storage_offsets(),
)
return (torch._utils._rebuild_nested_tensor, args_nested)
elif (
type(self) is not torch.Tensor
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
and (
isinstance(self, torch._subclasses.functional_tensor.FunctionalTensor)
or (
not isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
and self.data_ptr() == 0
)
)
):
arg_wrapper_subclass = (
type(self),
self.dtype,
tuple(self.size()),
self.stride(),
self.storage_offset(),
self.layout,
self.device,
self.requires_grad,
)
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
elif (
type(self) is not torch.Tensor
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
and (
isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
and not (skip_data and materialize_fake_tensors)
)
):
arg_wrapper_subclass = (
type(self),
self.dtype,
tuple(self.size()),
self.stride(),
self.storage_offset(),
self.layout,
self.device,
self.requires_grad,
)
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
else:
v3_dtypes = torch.storage._new_dtypes()
if self.dtype in v3_dtypes:
rebuild_func = torch._utils._rebuild_tensor_v3
storage = self.untyped_storage()
else:
# TODO: Once we decide to break serialization FC, no longer
# need to wrap with TypedStorage
rebuild_func = torch._utils._rebuild_tensor_v2 # type: ignore[assignment]
storage = torch.storage.TypedStorage(
wrap_storage=self._typed_storage()._untyped_storage,
dtype=self.dtype,
_internal=True,
) # type: ignore[assignment]
# TODO: remove hasattr, it's a hack to support versions of torch that
# don't have _subclasses
if (
hasattr(torch, "_subclasses")
and isinstance(self, torch._subclasses.fake_tensor.FakeTensor)
and skip_data
):
storage._fake_device = self.device
args = (
storage,
self.storage_offset(),
tuple(self.size()),
self.stride(),
self.requires_grad,
backward_hooks,
) # previously was self._backward_hooks
if isinstance(storage, torch.storage.UntypedStorage):
args = args + (self.dtype,) # type: ignore[assignment]
metadata = torch._utils.get_tensor_metadata(self)
if metadata:
args = args + (metadata,) # type: ignore[assignment]
return (rebuild_func, args)
def __setstate__(self, state):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__setstate__, (self,), self, state)
# Warning: this method is NOT called when you torch.load() a tensor;
# that is managed by _rebuild_tensor_v2
if not self.is_leaf:
raise RuntimeError("__setstate__ can be only called on leaf Tensors")
if len(state) == 4:
# legacy serialization of Tensor
# pyrefly: ignore [not-iterable]
self.set_(*state)
return
elif len(state) == 5:
# legacy serialization of Variable
self.data = state[0]
state = (state[3], state[4], state[2])
# The setting of _backward_hooks is expected to be a no-op.
# See Note [Don't serialize hooks]
self.requires_grad, _, self._backward_hooks = state
def __repr__(self, *, tensor_contents=None):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.__repr__, (self,), self, tensor_contents=tensor_contents
)
# All strings are unicode in Python 3.
return torch._tensor_str._str(self, tensor_contents=tensor_contents)
def backward(
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
):
r"""Computes the gradient of current tensor wrt graph leaves.
The graph is differentiated using the chain rule. If the tensor is
non-scalar (i.e. its data has more than one element) and requires
gradient, the function additionally requires specifying a ``gradient``.
It should be a tensor of matching type and shape, that represents
the gradient of the differentiated function w.r.t. ``self``.
This function accumulates gradients in the leaves - you might need to zero
``.grad`` attributes or set them to ``None`` before calling it.
See :ref:`Default gradient layouts<default-grad-layouts>`
for details on the memory layout of accumulated gradients.
.. note::
If you run any forward ops, create ``gradient``, and/or call ``backward``
in a user-specified CUDA stream context, see
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
.. note::
When ``inputs`` are provided and a given input is not a leaf,
the current implementation will call its grad_fn (though it is not strictly needed to get this gradients).
It is an implementation detail on which the user should not rely.
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
Args:
gradient (Tensor, optional): The gradient of the function
being differentiated w.r.t. ``self``.
This argument can be omitted if ``self`` is a scalar. Defaults to ``None``.
retain_graph (bool, optional): If ``False``, the graph used to compute the grads will be freed;
If ``True``, it will be retained. The default is ``None``, in which case the value is inferred from ``create_graph``
(i.e., the graph is retained only when higher-order derivative tracking is requested). Note that in nearly all cases
setting this option to True is not needed and often can be worked around in a much more efficient way.
create_graph (bool, optional): If ``True``, graph of the derivative will
be constructed, allowing to compute higher order derivative
products. Defaults to ``False``.
inputs (Sequence[Tensor], optional): Inputs w.r.t. which the gradient will be
accumulated into ``.grad``. All other tensors will be ignored. If not
provided, the gradient is accumulated into all the leaf Tensors that were
used to compute the :attr:`tensors`. Defaults to ``None``.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.backward,
(self,),
self,
gradient=gradient,
retain_graph=retain_graph,
create_graph=create_graph,
inputs=inputs,
)
torch.autograd.backward(
self, gradient, retain_graph, create_graph, inputs=inputs
)
def index(self, positions, dims):
"""
Index a regular tensor by binding specified positions to dims.
This converts a regular tensor to a first-class tensor by binding
the specified positional dimensions to Dim objects.
Args:
positions: Tuple of dimension positions to bind
dims: Dim objects or tuple of Dim objects to bind to
Returns:
First-class tensor with specified dimensions bound
"""
# TODO: make it possible to dispatch on positions/dims
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.index,
(self,),
self,
positions,
dims,
)
from functorch.dim import index
return index(self, positions, dims)
def register_hook(self, hook):
r"""Registers a backward hook.
The hook will be called every time a gradient with respect to the
Tensor is computed. The hook should have the following signature::
hook(grad) -> Tensor or None
The hook should not modify its argument, but it can optionally return
a new gradient which will be used in place of :attr:`grad`.
This function returns a handle with a method ``handle.remove()``
that removes the hook from the module.
.. note::
See :ref:`backward-hooks-execution` for more information on how when this hook
is executed, and how its execution is ordered relative to other hooks.
Example::
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
>>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
>>> v.backward(torch.tensor([1., 2., 3.]))
>>> v.grad
2
4
6
[torch.FloatTensor of size (3,)]
>>> h.remove() # removes the hook
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.register_hook, (self,), self, hook)
if not self.requires_grad:
raise RuntimeError(
"cannot register a hook on a tensor that doesn't require gradient"
)
if self._backward_hooks is None:
self._backward_hooks = OrderedDict()
if self.grad_fn is not None:
self.grad_fn._register_hook_dict(self)
from torch.utils.hooks import RemovableHandle
handle = RemovableHandle(self._backward_hooks)
self._backward_hooks[handle.id] = hook
return handle
def register_post_accumulate_grad_hook(self, hook):
r"""Registers a backward hook that runs after grad accumulation.
The hook will be called after all gradients for a tensor have been accumulated,
meaning that the .grad field has been updated on that tensor. The post
accumulate grad hook is ONLY applicable for leaf tensors (tensors without a
.grad_fn field). Registering this hook on a non-leaf tensor will error!
The hook should have the following signature::
hook(param: Tensor) -> None
Note that, unlike other autograd hooks, this hook operates on the tensor
that requires grad and not the grad itself. The hook can in-place modify
and access its Tensor argument, including its .grad field.
This function returns a handle with a method ``handle.remove()``
that removes the hook from the module.
.. note::
See :ref:`backward-hooks-execution` for more information on how when this hook
is executed, and how its execution is ordered relative to other hooks. Since
this hook runs during the backward pass, it will run in no_grad mode (unless
create_graph is True). You can use torch.enable_grad() to re-enable autograd
within the hook if you need it.
Example::
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
>>> lr = 0.01
>>> # simulate a simple SGD update
>>> h = v.register_post_accumulate_grad_hook(lambda p: p.add_(p.grad, alpha=-lr))
>>> v.backward(torch.tensor([1., 2., 3.]))
>>> v
tensor([-0.0100, -0.0200, -0.0300], requires_grad=True)
>>> h.remove() # removes the hook
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.register_post_accumulate_grad_hook, (self,), self, hook
)
if not self.requires_grad:
raise RuntimeError(
"cannot register a hook on a tensor that doesn't require gradient"
)
if self.grad_fn is not None:
raise RuntimeError(
"post accumulate grad hooks cannot be registered on non-leaf tensors"
)
if self._post_accumulate_grad_hooks is None:
self._post_accumulate_grad_hooks: dict[Any, Any] = (
# pyrefly: ignore [bad-assignment]
OrderedDict()
)
from torch.utils.hooks import RemovableHandle
handle = RemovableHandle(self._post_accumulate_grad_hooks)
self._post_accumulate_grad_hooks[handle.id] = hook
return handle
def reinforce(self, reward):
def trim(str):
return "\n".join([line.strip() for line in str.split("\n")])
raise RuntimeError(
trim(
r"""reinforce() was removed.
Use torch.distributions instead.
See https://pytorch.org/docs/main/distributions.html
Instead of:
probs = policy_network(state)
action = probs.multinomial()
next_state, reward = env.step(action)
action.reinforce(reward)
action.backward()
Use:
probs = policy_network(state)
# NOTE: categorical is equivalent to what used to be called multinomial
m = torch.distributions.Categorical(probs)
action = m.sample()
next_state, reward = env.step(action)
loss = -m.log_prob(action) * reward
loss.backward()
"""
)
)
detach = _C._add_docstr(
_C.TensorBase.detach,
r"""
Returns a new Tensor, detached from the current graph.
The result will never require gradient.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
.. note::
Returned Tensor shares the same storage with the original one.
In-place modifications on either of them will be seen, and may trigger
errors in correctness checks.
""",
)
detach_ = _C._add_docstr(
_C.TensorBase.detach_,
r"""
Detaches the Tensor from the graph that created it, making it a leaf.
Views cannot be detached in-place.
This method also affects forward mode AD gradients and the result will never
have forward mode AD gradients.
""",
)
def is_shared(self):
r"""Checks if tensor is in shared memory.
This is always ``True`` for CUDA tensors.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.is_shared, (self,), self)
return self._typed_storage()._is_shared()
def share_memory_(self):
r"""Moves the underlying storage to shared memory.
This is a no-op if the underlying storage is already in shared memory
and for CUDA tensors. Tensors in shared memory cannot be resized.
See :meth:`torch.UntypedStorage.share_memory_` for more details.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.share_memory_, (self,), self)
self._typed_storage()._share_memory_()
return self
def module_load(self, other, assign=False):
r"""Defines how to transform ``other`` when loading it into ``self`` in :meth:`~nn.Module.load_state_dict`.
Used when :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``.
It is expected that ``self`` is a parameter or buffer in an ``nn.Module`` and ``other`` is the
value in the state dictionary with the corresponding key, this method defines
how ``other`` is remapped before being swapped with ``self`` via
:func:`~torch.utils.swap_tensors` in :meth:`~nn.Module.load_state_dict`.
.. note::
This method should always return a new object that is not ``self`` or ``other``.
For example, the default implementation returns ``self.copy_(other).detach()``
if ``assign`` is ``False`` or ``other.detach()`` if ``assign`` is ``True``.
Args:
other (Tensor): value in state dict with key corresponding to ``self``
assign (bool): the assign argument passed to :meth:`nn.Module.load_state_dict`
"""
if has_torch_function_variadic(self, other):
return handle_torch_function(
Tensor.module_load, (self, other), self, other, assign=assign
)
if assign:
return other.detach()
else:
return self.copy_(other).detach()
def __reversed__(self):
r"""Reverses the tensor along dimension 0."""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__reversed__, (self,), self)
if self.dim() == 0:
return self
else:
return self.flip(0)
def norm(
self,
p: Optional[Union[float, str]] = "fro",
dim=None,
keepdim=False,
dtype=None,
):
r"""See :func:`torch.norm`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.norm, (self,), self, p=p, dim=dim, keepdim=keepdim, dtype=dtype
)
return torch.norm(self, p, dim, keepdim, dtype=dtype)
def solve(self, other):
from torch._linalg_utils import solve
return solve(self, other)
def lstsq(self, other):
from torch._linalg_utils import lstsq
return lstsq(self, other)
def eig(self, eigenvectors=False):
from torch._linalg_utils import eig
return eig(self, eigenvectors=eigenvectors)
def symeig(self, eigenvectors=False):
from torch._linalg_utils import _symeig
return _symeig(self, eigenvectors=eigenvectors)
def lu(self, pivot=True, get_infos=False):
r"""See :func:`torch.lu`"""
# If get_infos is True, then we don't need to check for errors and vice versa
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos
)
LU, pivots, infos = torch._lu_with_info(
self, pivot=pivot, check_errors=(not get_infos)
)
if get_infos:
return LU, pivots, infos
else:
return LU, pivots
def stft(
self,
n_fft: int,
hop_length: Optional[int] = None,
win_length: Optional[int] = None,
window: "Optional[Tensor]" = None,
center: bool = True,
pad_mode: str = "reflect",
normalized: bool = False,
onesided: Optional[bool] = None,
return_complex: Optional[bool] = None,
align_to_window: Optional[bool] = None,
):
r"""See :func:`torch.stft`
.. warning::
This function changed signature at version 0.4.1. Calling with
the previous signature may cause error or return incorrect result.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.stft,
(self,),
self,
n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=normalized,
onesided=onesided,
return_complex=return_complex,
align_to_window=align_to_window,
)
return torch.stft(
self,
n_fft,
hop_length,
win_length,
window,
center,
pad_mode,
normalized,
onesided,
return_complex=return_complex,
align_to_window=align_to_window,
)
def istft(
self,
n_fft: int,
hop_length: Optional[int] = None,
win_length: Optional[int] = None,
window: "Optional[Tensor]" = None,
center: bool = True,
normalized: bool = False,
onesided: Optional[bool] = None,
length: Optional[int] = None,
return_complex: bool = False,
):
r"""See :func:`torch.istft`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.istft,
(self,),
self,
n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
normalized=normalized,
onesided=onesided,
length=length,
return_complex=return_complex,
)
return torch.istft(
self,
n_fft,
hop_length,
win_length,
window,
center,
normalized,
onesided,
length,
return_complex=return_complex,
)
def resize(self, *sizes):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.resize, (self,), self, *sizes)
warnings.warn("non-inplace resize is deprecated", stacklevel=2)
from torch.autograd._functions import Resize
return Resize.apply(self, sizes)
def resize_as(self, tensor):
if has_torch_function_variadic(self, tensor):
return handle_torch_function(Tensor.resize_as, (self, tensor), self, tensor)
warnings.warn("non-inplace resize_as is deprecated", stacklevel=2)
from torch.autograd._functions import Resize
return Resize.apply(self, tensor.size())
def split(self, split_size, dim=0):
r"""See :func:`torch.split`"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.split, (self,), self, split_size, dim=dim
)
if isinstance(split_size, Tensor):
try:
split_size = int(split_size)
except ValueError:
pass
if isinstance(split_size, (int, torch.SymInt)):
return torch._VF.split(self, split_size, dim) # type: ignore[attr-defined]
else:
return torch._VF.split_with_sizes(
self,
# pyrefly: ignore [bad-argument-type]
split_size,
dim,
)
def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
r"""Returns the unique elements of the input tensor.
See :func:`torch.unique`
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.unique,
(self,),
self,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
return torch.unique(
self,
sorted=sorted,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
See :func:`torch.unique_consecutive`
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.unique_consecutive,
(self,),
self,
return_inverse=return_inverse,
return_counts=return_counts,
dim=dim,
)
return torch.unique_consecutive(
self, return_inverse=return_inverse, return_counts=return_counts, dim=dim
)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rsub__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor":
return _C._VariableFunctions.rsub(self, other)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rdiv__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor":
return self.reciprocal() * other
__rtruediv__ = __rdiv__
__itruediv__ = _C.TensorBase.__idiv__
# pyrefly: ignore [bad-override]
__pow__ = cast(
Callable[
["torch._C.TensorBase", Union["Tensor", int, float, bool, complex]],
"Tensor",
],
_handle_torch_function_and_wrap_type_error_to_not_implemented(
_C.TensorBase.pow
),
)
__ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
_C.TensorBase.pow_
)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rmod__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor":
return torch.remainder(other, self)
def __format__(self, format_spec):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
if self.dim() == 0 and not self.is_meta and type(self) is Tensor:
# Use detach() here to avoid the warning when converting a scalar Tensor that
# requires gradients to a python number. It is ok for formatting.
return self.detach().item().__format__(format_spec)
return object.__format__(self, format_spec)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rpow__(self, other: Union["Tensor", int, float, bool, complex]) -> "Tensor":
return torch.pow(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __floordiv__(self, other: Union["Tensor", int, float, bool]) -> "Tensor": # type: ignore[override]
# TODO(rec): the superclass says it accepts complex here,
# but torch.floor_divide says it doesn't.
return torch.floor_divide(self, other)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rfloordiv__(self, other: Union["Tensor", int, float, bool]) -> "Tensor": # type: ignore[override]
return torch.floor_divide(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rlshift__(
self, other: Union["Tensor", int, float, bool, complex]
) -> "Tensor":
return torch.bitwise_left_shift(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rrshift__(
self, other: Union["Tensor", int, float, bool, complex]
) -> "Tensor":
return torch.bitwise_right_shift(other, self)
@_handle_torch_function_and_wrap_type_error_to_not_implemented
def __rmatmul__(self, other: "Tensor") -> "Tensor":
return torch.matmul(other, self)
__pos__ = _C.TensorBase.positive
__neg__ = _C.TensorBase.neg
__abs__ = _C.TensorBase.abs
def __len__(self):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__len__, (self,), self)
if self.dim() == 0:
raise TypeError("len() of a 0-d tensor")
if torch._C._get_tracing_state():
warnings.warn(
"Using len to get tensor shape might cause the trace to be incorrect. "
"Recommended usage would be tensor.shape[0]. "
"Passing a tensor of different shape might lead to errors or silently give "
"incorrect results.",
category=torch.jit.TracerWarning,
stacklevel=2,
)
return self.shape[0]
def __iter__(self):
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
# generator and don't eagerly perform all the indexes. This could
# save us work, and also helps keep trace ordering deterministic
# (e.g., if you zip(*hiddens), the eager map will force all the
# indexes of hiddens[0] before hiddens[1], while the generator
# map will interleave them.)
# NB: We have intentionally skipped __torch_function__ dispatch here.
# See gh-54457
if self.dim() == 0:
raise TypeError("iteration over a 0-d tensor")
if torch._C._get_tracing_state():
warnings.warn(
"Iterating over a tensor might cause the trace to be incorrect. "
"Passing a tensor of different shape won't change the number of "
"iterations executed (and might lead to errors or silently give "
"incorrect results).",
category=torch.jit.TracerWarning,
stacklevel=2,
)
return iter(self.unbind(0))
def __hash__(self):
# Do NOT handle __torch_function__ here as user's default
# implementation that handle most functions will most likely do it wrong.
# It can be easily overridden by defining this method on the user
# subclass if needed.
return id(self)
def __dir__(self):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__dir__, (self,), self)
tensor_methods = dir(self.__class__)
tensor_methods.remove("volatile") # deprecated
attrs = list(self.__dict__.keys())
keys = tensor_methods + attrs
# property only available dense, cuda tensors
if (not self.is_cuda) or self.is_sparse:
keys.remove("__cuda_array_interface__")
return sorted(keys)
# Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
__array_priority__ = 1000 # prefer Tensor ops over numpy ones
def __array__(self, dtype=None):
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
if dtype is None:
return self.numpy()
else:
return self.numpy().astype(dtype, copy=False)
# Wrap Numpy array again in a suitable tensor when done, to support e.g.
# `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
def __array_wrap__(self, array):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.__array_wrap__, (self,), self, array=array
)
if array.dtype == bool:
# Workaround, torch has no built-in bool tensor
array = array.astype("uint8")
return torch.from_numpy(array)
def __contains__(self, element: Any, /) -> bool:
r"""Check if `element` is present in tensor
Args:
element (Tensor or scalar): element to be checked
for presence in current tensor"
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__contains__, (self,), self, element)
if isinstance(
element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)
):
# type hint doesn't understand the __contains__ result array
return bool((element == self).any().item()) # type: ignore[union-attr]
raise RuntimeError(
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}."
)
@property
def __cuda_array_interface__(self):
"""Array view description for cuda tensors.
See:
https://numba.pydata.org/numba-doc/dev/cuda/cuda_array_interface.html
"""
if has_torch_function_unary(self):
# TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
return handle_torch_function(
Tensor.__cuda_array_interface__.__get__, # type: ignore[attr-defined]
(self,),
self,
)
# raise AttributeError for unsupported tensors, so that
# hasattr(cpu_tensor, "__cuda_array_interface__") is False.
if not self.is_cuda:
raise AttributeError(
f"Can't get __cuda_array_interface__ on non-CUDA tensor type: {self.type()} "
"If CUDA data is required use tensor.cuda() to copy tensor to device memory."
)
if self.is_sparse:
raise AttributeError(
f"Can't get __cuda_array_interface__ on sparse type: {self.type()} "
"Use Tensor.to_dense() to convert to a dense tensor first."
)
# RuntimeError, matching tensor.__array__() behavior.
if self.requires_grad:
raise RuntimeError(
"Can't get __cuda_array_interface__ on Variable that requires grad. "
"If gradients aren't required, use var.detach() to get Variable that doesn't require grad."
)
typestr = _dtype_to_typestr(self.dtype)
itemsize = self.element_size()
shape = tuple(self.shape)
if self.is_contiguous():
# __cuda_array_interface__ v2 requires the strides to be omitted
# (either not set or set to None) for C-contiguous arrays.
strides = None
else:
strides = tuple(s * itemsize for s in self.stride())
data_ptr = self.data_ptr() if self.numel() > 0 else 0
data = (data_ptr, False) # read-only is false
return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)
def storage_type(self):
r"""storage_type() -> type
Returns the type of the underlying storage.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.storage_type, (self,), self)
torch.storage._warn_typed_storage_removal()
return self._typed_storage()._get_legacy_storage_class()
def refine_names(self, *names): # pyrefly: ignore # bad-override
r"""Refines the dimension names of :attr:`self` according to :attr:`names`.
Refining is a special case of renaming that "lifts" unnamed dimensions.
A ``None`` dim can be refined to have any name; a named dim can only be
refined to have the same name.
Because named tensors can coexist with unnamed tensors, refining names
gives a nice way to write named-tensor-aware code that works with both
named and unnamed tensors.
:attr:`names` may contain up to one Ellipsis (``...``).
The Ellipsis is expanded greedily; it is expanded in-place to fill
:attr:`names` to the same length as ``self.dim()`` using names from the
corresponding indices of ``self.names``.
Python 2 does not support Ellipsis but one may use a string literal
instead (``'...'``).
Args:
names (iterable of str): The desired names of the output tensor. May
contain up to one Ellipsis.
Examples::
>>> imgs = torch.randn(32, 3, 128, 128)
>>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W')
>>> named_imgs.names
('N', 'C', 'H', 'W')
>>> tensor = torch.randn(2, 3, 5, 7, 11)
>>> tensor = tensor.refine_names('A', ..., 'B', 'C')
>>> tensor.names
('A', None, None, 'B', 'C')
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.refine_names, (self,), self, *names)
names = resolve_ellipsis(names, self.names, "refine_names")
return super().refine_names(names)
def align_to(self, *names): # pyrefly: ignore # bad-override
r"""Permutes the dimensions of the :attr:`self` tensor to match the order
specified in :attr:`names`, adding size-one dims for any new names.
All of the dims of :attr:`self` must be named in order to use this method.
The resulting tensor is a view on the original tensor.
All dimension names of :attr:`self` must be present in :attr:`names`.
:attr:`names` may contain additional names that are not in ``self.names``;
the output tensor has a size-one dimension for each of those new names.
:attr:`names` may contain up to one Ellipsis (``...``).
The Ellipsis is expanded to be equal to all dimension names of :attr:`self`
that are not mentioned in :attr:`names`, in the order that they appear
in :attr:`self`.
Python 2 does not support Ellipsis but one may use a string literal
instead (``'...'``).
Args:
names (iterable of str): The desired dimension ordering of the
output tensor. May contain up to one Ellipsis that is expanded
to all unmentioned dim names of :attr:`self`.
Examples::
>>> tensor = torch.randn(2, 2, 2, 2, 2, 2)
>>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F')
# Move the F and E dims to the front while keeping the rest in order
>>> named_tensor.align_to('F', 'E', ...)
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.align_to, (self,), self, *names)
ellipsis_idx = single_ellipsis_index(names, "align_to")
if ellipsis_idx is None:
return super().align_to(names)
return super().align_to(
[name for name in names if not is_ellipsis(name)], ellipsis_idx
)
def unflatten(self, dim, sizes): # type: ignore[override]
r"""
unflatten(dim, sizes) -> Tensor
See :func:`torch.unflatten`.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.unflatten, (self,), self, dim, sizes)
if not sizes:
raise RuntimeError("unflatten: sizes must be non-empty")
names = None
if isinstance(sizes, OrderedDict) or (
isinstance(sizes, (tuple, list)) and isinstance(sizes[0], (tuple, list))
):
names, sizes = unzip_namedshape(sizes)
return super().unflatten(dim, sizes, names)
else:
return super().unflatten(dim, sizes)
def rename_(self, *names, **rename_map):
"""In-place version of :meth:`~Tensor.rename`."""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.rename_, (self,), self, *names, **rename_map
)
# Note [rename_ / rename API]
# The Python API for these is different from the C++ API. In Python:
# 1) tensor.rename(*names) takes a vararglist of names
# 2) tensor.rename(**rename_map) takes a map of names to rename.
# C++ is static, making it difficult to implement similar behavior.
return update_names(self, names, rename_map, inplace=True)
def rename(self, *names, **rename_map):
"""Renames dimension names of :attr:`self`.
There are two main usages:
``self.rename(**rename_map)`` returns a view on tensor that has dims
renamed as specified in the mapping :attr:`rename_map`.
``self.rename(*names)`` returns a view on tensor, renaming all
dimensions positionally using :attr:`names`.
Use ``self.rename(None)`` to drop names on a tensor.
One cannot specify both positional args :attr:`names` and keyword args
:attr:`rename_map`.
Examples::
>>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
>>> renamed_imgs = imgs.rename(N='batch', C='channels')
>>> renamed_imgs.names
('batch', 'channels', 'H', 'W')
>>> renamed_imgs = imgs.rename(None)
>>> renamed_imgs.names
(None, None, None, None)
>>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width')
>>> renamed_imgs.names
('batch', 'channel', 'height', 'width')
.. warning::
The named tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(
Tensor.rename, (self,), self, *names, **rename_map
)
# See Note [rename_ / rename API]
return update_names(self, names, rename_map, inplace=False)
def to_sparse_coo(self):
"""Convert a tensor to :ref:`coordinate format <sparse-coo-docs>`.
Examples::
>>> dense = torch.randn(5, 5)
>>> sparse = dense.to_sparse_coo()
>>> sparse._nnz()
25
"""
return self.to_sparse()
def dim_order(
self, *, ambiguity_check: Union[bool, list[torch.memory_format]] = False
):
"""
dim_order(ambiguity_check=False) -> tuple
Returns the uniquely determined tuple of int describing the dim order or
physical layout of :attr:`self`.
The dim order represents how dimensions are laid out in memory of dense tensors,
starting from the outermost to the innermost dimension.
Note that the dim order may not always be uniquely determined.
If `ambiguity_check` is True, this function raises a RuntimeError when the dim order cannot be uniquely determined;
If `ambiguity_check` is a list of memory formats, this function raises a RuntimeError when tensor can not be interpreted
into exactly one of the given memory formats, or it cannot be uniquely determined.
If `ambiguity_check` is False, it will return one of legal dim order(s) without checking its uniqueness.
Otherwise, it will raise TypeError.
Args:
ambiguity_check (bool or List[torch.memory_format]): The check method for ambiguity of dim order.
Examples::
>>> torch.empty((2, 3, 5, 7)).dim_order()
(0, 1, 2, 3)
>>> torch.empty((2, 3, 5, 7)).transpose(1, 2).dim_order()
(0, 2, 1, 3)
>>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).dim_order()
(0, 2, 3, 1)
>>> torch.empty((1, 2, 3, 4)).dim_order()
(0, 1, 2, 3)
>>> try:
... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check=True)
... except RuntimeError as e:
... print(e)
The tensor does not have unique dim order, or cannot map to exact one of the given memory formats.
>>> torch.empty((1, 2, 3, 4)).dim_order(
... ambiguity_check=[torch.contiguous_format, torch.channels_last]
... ) # It can be mapped to contiguous format
(0, 1, 2, 3)
>>> try:
... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check="ILLEGAL") # type: ignore[arg-type]
... except TypeError as e:
... print(e)
The ambiguity_check argument must be a bool or a list of memory formats.
.. warning::
The dim_order tensor API is experimental and subject to change.
"""
if has_torch_function_unary(self):
return handle_torch_function(Tensor.dim_order, (self,), self)
if self.is_sparse:
raise AttributeError(
f"Can't get dim order on sparse type: {self.type()} "
"Use Tensor.to_dense() to convert to a dense tensor first."
)
# Sanity check ambiguity_check data types
if not isinstance(ambiguity_check, bool):
if not isinstance(ambiguity_check, list):
raise TypeError(
"The ambiguity_check argument must be a bool or a list of memory formats."
)
for memory_format in ambiguity_check:
if not isinstance(memory_format, torch.memory_format):
raise TypeError(
"The ambiguity_check argument must be a bool or a list of memory formats."
)
def invalid_unique_memory_format(tensor, valid_memory_formats):
"""
Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise.
"""
n_legality = 0
for memory_format in valid_memory_formats:
if tensor.is_contiguous(memory_format=memory_format):
n_legality += 1
return n_legality != 1
def has_multiple_dim_order(tensor):
"""
Returns True if there're multiple legal dim orders for given tensor, False otherwise.
The tensor is considered to have multiple legal dim orders if either of the following conditions is met:
* Singleton Dimensions: There's at least one singleteon dimension in the tensor.
Since their size is 1, they don't affect the memory offset (stride * index
is zero because index is always zero). Therefore, they can be placed anywhere
in the dimension order without changing how data is accessed.
* Same strides: Strides reflect how the tensor is stored in memory.
If any two dimensions have the same stride, swapping these dimensions won't
change how data is accessed, leading to multiple correct dimension orders.
"""
from torch.fx.experimental.symbolic_shapes import guard_or_false
sizes = tensor.size()
strides = tensor.stride()
# Check if there are any duplicate strides
has_duplicate_strides = any(
guard_or_false(earlier == later)
for earlier, later in itertools.pairwise(strides)
)
# Check if there are any singleton dimensions
has_singleton_dims = any(guard_or_false(size == 1) for size in sizes)
return has_duplicate_strides or has_singleton_dims
valid_memory_formats = (
ambiguity_check if isinstance(ambiguity_check, list) else []
)
check_multiple_dim_order = (
ambiguity_check if isinstance(ambiguity_check, bool) else True
)
if (
check_multiple_dim_order and has_multiple_dim_order(self)
) and invalid_unique_memory_format(self, valid_memory_formats):
raise RuntimeError(
"The tensor does not have unique dim order, or cannot map to exact one of the given memory formats."
)
import torch._prims_common as utils
out_perm, raise_ambiguity = (
utils.compute_elementwise_output_logical_to_physical_perm(
self, ambiguity_check=ambiguity_check
)
)
if raise_ambiguity:
raise RuntimeError("The tensor does not have unique dim order.")
return tuple(out_perm)
def _update_names(self, names, inplace):
if has_torch_function_unary(self):
return handle_torch_function(
Tensor._update_names, (self,), self, names, inplace
)
# See Note [rename_ / rename API]
if inplace:
return super().rename_(names)
else:
return super().rename(names)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
"""
This __torch_function__ implementation wraps subclasses such that
methods called on subclasses return a subclass instance instead of
a ``torch.Tensor`` instance.
One corollary to this is that you need coverage for torch.Tensor
methods if implementing __torch_function__ for subclasses.
We recommend always calling ``super().__torch_function__`` as the base
case when doing the above.
While not mandatory, we recommend making `__torch_function__` a classmethod.
"""
if kwargs is None:
kwargs = {}
if not all(issubclass(cls, t) for t in types):
return NotImplemented
with _C.DisableTorchFunctionSubclass():
ret = func(*args, **kwargs)
if func in get_default_nowrap_functions():
return ret
else:
return _convert(ret, cls)
__torch_dispatch__ = _C._disabled_torch_dispatch_impl
def __dlpack__(
self,
*,
stream: Optional[Any] = -1,
max_version: Optional[tuple[int, int]] = None,
dl_device: Optional[tuple[enum.IntEnum, int]] = None,
copy: Optional[bool] = None,
):
"""
Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_
of the current tensor to be exported to other libraries.
This function will be called from the `from_dlpack` method
of the library that will consume the capsule. `from_dlpack` passes the current
stream to this method as part of the specification.
Args:
stream (integer or None): An optional Python integer representing a
pointer to a CUDA stream. The current stream is synchronized with
this stream before the capsule is created, and since the capsule
shares its storage with the tensor this make it safe to access from
both streams. If -1 is passed then no synchronization is performed.
If 1 (on CUDA) or 0 (on ROCM) then the default stream is used for
synchronization. This API intentionally slightly deviates from the DLPack
guidance: the default stream is -1 (stream-preserving; no cross-stream sync),
because many from_dlpack implementations intend stream preservation.
For non-CUDA devices, -1 is treated the same as None.
max_version (tuple[int, int] or None): An optional Python tuple with
2 integers, representing the maximum version the caller supports. If
None (default), PyTorch will fallback to DLPack 0.8.
dl_device (tuple[DLDeviceType, int] or None): An optional tuple specifying
in which device the exported DLPack capsule should be on. If None (default),
the exported DLPack capsule will be on the same device as ``self``.
copy (bool or None): An optional boolean indicating whether or not to copy
``self``. If None, PyTorch will copy only if necessary.
"""
if has_torch_function_unary(self):
args = (self,)
kwargs = {
"stream": stream,
"max_version": max_version,
"dl_device": dl_device,
"copy": copy,
}
return handle_torch_function(Tensor.__dlpack__, (self,), *args, **kwargs)
# DLPack capsules can't capture all of PyTorch's semantics,
# so we prohibit exporting tensors that would lose their properties like
# requires_grad and having the conjugate bit set.
if self.requires_grad:
raise BufferError(
"Can't export tensors that require gradient, use tensor.detach()"
)
if self.is_conj():
raise BufferError("Can't export tensors with the conjugate bit set")
if self.layout != torch.strided:
raise BufferError(
"Can't export tensors with layout other than torch.strided"
)
if (
self.device.type == "cuda"
and self.device.index != torch.cuda.current_device()
):
raise BufferError(
"Can't export tensors on a different CUDA device index. "
f"Expected: {self.device.index}. "
f"Current device: {torch.cuda.current_device()}."
)
if stream is not None and type(stream) is not int:
# Stream pointers in CUDA/ROCm are uniquely numbered and can
# be retrieved from their integer value.
raise TypeError("stream must be ``int`` or ``none``")
elif self.device.type == "cuda" and stream != -1:
# NB: This logic handles the special case values for default
# streams and must be kept in sync with from_dlpack in
# torch/utils/dlpack.py
is_rocm = torch.version.hip is not None
is_cuda = not is_rocm
if stream is None or (is_rocm and stream == 0) or (is_cuda and stream == 1):
stream = torch.cuda.default_stream()
else:
if is_cuda and stream == 2:
raise BufferError("per-thread default stream is not supported.")
device_str = "CUDA" if is_cuda else "ROCm"
assert (is_cuda and stream != 0) or (
is_rocm and stream not in (1, 2)
), f"unsupported stream on {device_str}: {stream}."
stream = torch.cuda.ExternalStream(stream)
# Only synchronize on different streams
current_stream = torch.cuda.current_stream()
if stream != current_stream:
event = torch.cuda.Event()
event.record(current_stream)
stream.wait_event(event)
elif self.device.type == "cpu":
assert stream is None or stream == -1, "stream should be None on cpu."
if self.device.type == "xla":
import torch_xla
import torch_xla.utils.dlpack as xla_dlpack
if (
len(torch_xla.real_devices()) <= 0
or "cuda" not in torch_xla.real_devices()[0].lower()
):
raise RuntimeError(
"Can't export to dlpack an XLA tensor that is not on CUDA."
)
# Does not support DLPack 1.0, yet.
return xla_dlpack.to_dlpack(self)
if max_version is None or max_version[0] < 1:
# Fallback to the old, unversioned variant.
return _C._to_dlpack(self, dl_device=dl_device, copy=copy)
return _C._to_dlpack_versioned(self, dl_device=dl_device, copy=copy)
def __dlpack_device__(self) -> tuple[enum.IntEnum, int]:
if has_torch_function_unary(self):
return handle_torch_function(Tensor.__dlpack_device__, (self,), self)
from torch.utils.dlpack import DLDeviceType
device = self.device
idx = device.index if device.index is not None else 0
torch_device_type = device.type
if torch_device_type == "cuda" and torch.version.hip is not None:
device_type = DLDeviceType.kDLROCM
elif torch_device_type == "cpu" and self.is_pinned():
device_type = DLDeviceType.kDLCUDAHost
elif torch_device_type == "cuda":
device_type = DLDeviceType.kDLCUDA
elif torch_device_type == "cpu":
device_type = DLDeviceType.kDLCPU
elif torch_device_type == "xpu":
device_type = DLDeviceType.kDLOneAPI
elif self.device.type == "privateuse1":
device_type = DLDeviceType.kDLExtDev
elif torch_device_type == "xla":
import torch_xla
if (
len(torch_xla.real_devices()) <= 0
or "cuda" not in torch_xla.real_devices()[0].lower()
):
raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
device_type = DLDeviceType.kDLCUDA
elif torch_device_type == "mps":
device_type = DLDeviceType.kDLMetal
else:
raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
return (device_type, idx)
__module__ = "torch"
def _convert(ret, cls):
if cls is Tensor:
return ret
if isinstance(ret, Tensor) and not isinstance(ret, cls):
ret = ret.as_subclass(cls)
if isinstance(ret, (tuple, list)):
# Also handles things like namedtuples
ret = type(ret)(_convert(r, cls) for r in ret)
return ret
| Tensor |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 191974,
"end": 194504
} | class ____:
def test_pmf(self):
# comparison to R
k = np.arange(-10, 15)
mu1, mu2 = 10, 5
skpmfR = np.array(
[4.2254582961926893e-005, 1.1404838449648488e-004,
2.8979625801752660e-004, 6.9177078182101231e-004,
1.5480716105844708e-003, 3.2412274963433889e-003,
6.3373707175123292e-003, 1.1552351566696643e-002,
1.9606152375042644e-002, 3.0947164083410337e-002,
4.5401737566767360e-002, 6.1894328166820688e-002,
7.8424609500170578e-002, 9.2418812533573133e-002,
1.0139793148019728e-001, 1.0371927988298846e-001,
9.9076583077406091e-002, 8.8546660073089561e-002,
7.4187842052486810e-002, 5.8392772862200251e-002,
4.3268692953013159e-002, 3.0248159818374226e-002,
1.9991434305603021e-002, 1.2516877303301180e-002,
7.4389876226229707e-003])
assert_almost_equal(stats.skellam.pmf(k, mu1, mu2), skpmfR, decimal=15)
def test_cdf(self):
# comparison to R, only 5 decimals
k = np.arange(-10, 15)
mu1, mu2 = 10, 5
skcdfR = np.array(
[6.4061475386192104e-005, 1.7810985988267694e-004,
4.6790611790020336e-004, 1.1596768997212152e-003,
2.7077485103056847e-003, 5.9489760066490718e-003,
1.2286346724161398e-002, 2.3838698290858034e-002,
4.3444850665900668e-002, 7.4392014749310995e-002,
1.1979375231607835e-001, 1.8168808048289900e-001,
2.6011268998306952e-001, 3.5253150251664261e-001,
4.5392943399683988e-001, 5.5764871387982828e-001,
6.5672529695723436e-001, 7.4527195703032389e-001,
8.1945979908281064e-001, 8.7785257194501087e-001,
9.2112126489802404e-001, 9.5136942471639818e-001,
9.7136085902200120e-001, 9.8387773632530240e-001,
9.9131672394792536e-001])
assert_almost_equal(stats.skellam.cdf(k, mu1, mu2), skcdfR, decimal=5)
def test_extreme_mu2(self):
# check that crash reported by gh-17916 large mu2 is resolved
x, mu1, mu2 = 0, 1, 4820232647677555.0
assert_allclose(stats.skellam.pmf(x, mu1, mu2), 0, atol=1e-16)
assert_allclose(stats.skellam.cdf(x, mu1, mu2), 1, atol=1e-16)
| TestSkellam |
python | ray-project__ray | python/ray/serve/tests/test_task_processor.py | {
"start": 484,
"end": 4958
} | class ____:
def __init__(self):
self.processed_tasks = set()
def add_task(self, task_data):
self.processed_tasks.add(task_data)
def get_processed_tasks(self):
return self.processed_tasks
def get_count(self):
return len(self.processed_tasks)
@ray.remote
def send_request_to_queue(
processor_config: TaskProcessorConfig, data, task_name="process_request"
):
adapter_instance_global = instantiate_adapter_from_config(
task_processor_config=processor_config
)
result = adapter_instance_global.enqueue_task_sync(task_name, args=[data])
assert result.id is not None
return result.id
@pytest.fixture(scope="function")
def temp_queue_directory():
"""Creates a temporary directory with 'queue', 'results', and 'control' subdirectories for task consumer tests."""
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir_path = Path(tmpdir)
data_folder_queue = tmpdir_path / "queue"
data_folder_queue.mkdir()
results_path = tmpdir_path / "results"
results_path.mkdir()
control_path = tmpdir_path / "control"
control_path.mkdir()
yield {
"queue_path": data_folder_queue,
"results_path": results_path,
"control_path": control_path,
}
@pytest.fixture(scope="function")
def transport_options(temp_queue_directory):
"""Create standard transport options for filesystem broker."""
queue_path = temp_queue_directory["queue_path"]
control_path = temp_queue_directory["control_path"]
return {
# Incoming message queue - where new task messages are written when sent to broker
"data_folder_in": str(queue_path),
# Outgoing message storage - where task results and responses are written after completion
"data_folder_out": str(queue_path),
# Processed message archive - where messages are moved after successful processing
"data_folder_processed": str(queue_path),
# Control message storage - where Celery management and control commands are stored
"control_folder": str(control_path),
}
@pytest.fixture(scope="function")
def create_processor_config(temp_queue_directory, transport_options):
"""Create a TaskProcessorConfig with common defaults."""
def _create(
failed_task_queue_name=None, unprocessable_task_queue_name=None, **kwargs
):
results_path = temp_queue_directory["results_path"]
config_params = {
"queue_name": "my_default_app_queue",
"adapter_config": CeleryAdapterConfig(
broker_url="filesystem://",
backend_url=f"file://{results_path}",
broker_transport_options=transport_options,
),
}
# Add dead letter queue names if provided
if failed_task_queue_name is not None:
config_params["failed_task_queue_name"] = failed_task_queue_name
if unprocessable_task_queue_name is not None:
config_params[
"unprocessable_task_queue_name"
] = unprocessable_task_queue_name
config_params.update(kwargs)
return TaskProcessorConfig(**config_params)
return _create
def _get_task_counts_by_routing_key(queue_path):
"""Counts tasks in a queue directory by reading the routing key from each message."""
counts = defaultdict(int)
if not queue_path.exists():
return counts
# Celery doesn't provide a way to get the queue size.
# so we've to levarage the broker's API to get the queue size.
# Since we are using the filesystem broker in tests, we can read the files in the queue directory to get the queue size.
for msg_file in queue_path.iterdir():
if msg_file.is_file():
try:
with open(msg_file, "r") as f:
data = json.load(f)
routing_key = (
data.get("properties", {})
.get("delivery_info", {})
.get("routing_key")
)
if routing_key:
counts[routing_key] += 1
except (json.JSONDecodeError, IOError):
# Ignore files that aren't valid JSON or are otherwise unreadable
continue
return counts
@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on Windows.")
| ProcessedTasksTracker |
python | euske__pdfminer | pdfminer/image.py | {
"start": 1729,
"end": 4086
} | class ____:
def __init__(self, outdir):
self.outdir = outdir
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
return
def export_image(self, image):
stream = image.stream
filters = stream.get_filters()
(width, height) = image.srcsize
if len(filters) == 1 and filters[0][0] in LITERALS_DCT_DECODE:
ext = '.jpg'
elif (image.bits == 1 or
image.bits == 8 and image.colorspace in (LITERAL_DEVICE_RGB, LITERAL_DEVICE_GRAY)):
ext = '.%dx%d.bmp' % (width, height)
else:
ext = '.%d.%dx%d.img' % (image.bits, width, height)
name = image.name+ext
path = os.path.join(self.outdir, name)
with open(path, 'wb') as fp:
if ext == '.jpg':
raw_data = stream.get_rawdata()
if LITERAL_DEVICE_CMYK in image.colorspace:
from PIL import Image
from PIL import ImageChops
ifp = BytesIO(raw_data)
i = Image.open(ifp)
i = ImageChops.invert(i)
i = i.convert('RGB')
i.save(fp, 'JPEG')
else:
fp.write(raw_data)
elif image.bits == 1:
bmp = BMPWriter(fp, 1, width, height)
data = stream.get_data()
i = 0
width = (width+7)//8
for y in range(height):
bmp.write_line(y, data[i:i+width])
i += width
elif image.bits == 8 and image.colorspace is LITERAL_DEVICE_RGB:
bmp = BMPWriter(fp, 24, width, height)
data = stream.get_data()
i = 0
width = width*3
for y in range(height):
bmp.write_line(y, data[i:i+width])
i += width
elif image.bits == 8 and image.colorspace is LITERAL_DEVICE_GRAY:
bmp = BMPWriter(fp, 8, width, height)
data = stream.get_data()
i = 0
for y in range(height):
bmp.write_line(y, data[i:i+width])
i += width
else:
fp.write(stream.get_data())
return name
| ImageWriter |
python | pytorch__pytorch | test/torch_np/test_reductions.py | {
"start": 2364,
"end": 5237
} | class ____(TestCase):
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert np.mean(A) == 3.5
assert np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5]))
assert np.all(np.mean(A, 1) == np.array([2.0, 5.0]))
# XXX: numpy emits a warning on empty slice
assert np.isnan(np.mean([]))
m = np.asarray(A)
assert np.mean(A) == m.mean()
def test_mean_values(self):
# rmat = np.random.random((4, 5))
rmat = np.arange(20, dtype=float).reshape((4, 5))
cmat = rmat + 1j * rmat
import warnings
with warnings.catch_warnings():
warnings.simplefilter("error")
for mat in [rmat, cmat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = np.mean(mat, axis=axis) * mat.shape[axis]
assert_allclose(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = np.mean(mat, axis=axis) * mat.size
assert_allclose(res, tgt)
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
assert np.mean(np.ones(100000, dtype="float16")) == 1
@xpassIfTorchDynamo_np # (reason="XXX: mean(..., where=...) not implemented")
def test_mean_where(self):
a = np.arange(16).reshape((4, 4))
wh_full = np.array(
[
[False, True, False, True],
[True, False, True, False],
[True, True, False, False],
[False, False, True, True],
]
)
wh_partial = np.array([[False], [True], [True], [False]])
_cases = [
(1, True, [1.5, 5.5, 9.5, 13.5]),
(0, wh_full, [6.0, 5.0, 10.0, 9.0]),
(1, wh_full, [2.0, 5.0, 8.5, 14.5]),
(0, wh_partial, [6.0, 7.0, 8.0, 9.0]),
]
for _ax, _wh, _res in _cases:
assert_allclose(a.mean(axis=_ax, where=_wh), np.array(_res))
assert_allclose(np.mean(a, axis=_ax, where=_wh), np.array(_res))
a3d = np.arange(16).reshape((2, 2, 4))
_wh_partial = np.array([False, True, True, False])
_res = [[1.5, 5.5], [9.5, 13.5]]
assert_allclose(a3d.mean(axis=2, where=_wh_partial), np.array(_res))
assert_allclose(np.mean(a3d, axis=2, where=_wh_partial), np.array(_res))
with pytest.warns(RuntimeWarning):
assert_allclose(
a.mean(axis=1, where=wh_partial), np.array([np.nan, 5.5, 9.5, np.nan])
)
with pytest.warns(RuntimeWarning):
assert_equal(a.mean(where=False), np.nan)
with pytest.warns(RuntimeWarning):
assert_equal(np.mean(a, where=False), np.nan)
@instantiate_parametrized_tests
| TestMean |
python | jmcnamara__XlsxWriter | xlsxwriter/test/utility/test_xl_rowcol_to_cell.py | {
"start": 323,
"end": 2684
} | class ____(unittest.TestCase):
"""
Test xl_rowcol_to_cell() utility function.
"""
def test_xl_rowcol_to_cell(self):
"""Test xl_rowcol_to_cell()"""
tests = [
# row, col, A1 string
(0, 0, "A1"),
(0, 1, "B1"),
(0, 2, "C1"),
(0, 9, "J1"),
(1, 0, "A2"),
(2, 0, "A3"),
(9, 0, "A10"),
(1, 24, "Y2"),
(7, 25, "Z8"),
(9, 26, "AA10"),
(1, 254, "IU2"),
(1, 255, "IV2"),
(1, 256, "IW2"),
(0, 16383, "XFD1"),
(1048576, 16384, "XFE1048577"),
(0, -1, ""),
(-1, 0, ""),
]
for row, col, range in tests:
exp = range
got = xl_rowcol_to_cell(row, col)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
def test_xl_rowcol_to_cell_abs(self):
"""Test xl_rowcol_to_cell() with absolute references"""
tests = [
# row, col, row_abs, col_abs, A1 range
(0, 0, True, False, "A$1"),
(0, 0, False, True, "$A1"),
(0, 0, True, True, "$A$1"),
(-1, 0, 0, 0, ""),
(0, -1, 0, 0, ""),
]
for row, col, row_abs, col_abs, range in tests:
exp = range
got = xl_rowcol_to_cell(row, col, row_abs, col_abs)
# Ignore the warnings for negative values.
warnings.filterwarnings("ignore")
self.assertEqual(exp, got)
def test_xl_rowcol_to_cell_fast(self):
"""Test xl_rowcol_to_cell_fast()"""
tests = [
# row, col, A1 range
(0, 0, "A1"),
(0, 1, "B1"),
(0, 2, "C1"),
(0, 9, "J1"),
(1, 0, "A2"),
(2, 0, "A3"),
(9, 0, "A10"),
(1, 24, "Y2"),
(7, 25, "Z8"),
(9, 26, "AA10"),
(1, 254, "IU2"),
(1, 255, "IV2"),
(1, 256, "IW2"),
(0, 16383, "XFD1"),
(1048576, 16384, "XFE1048577"),
]
for row, col, range in tests:
exp = range
got = xl_rowcol_to_cell_fast(row, col)
self.assertEqual(exp, got)
| TestUtility |
python | django__django | tests/auth_tests/test_forms.py | {
"start": 15502,
"end": 17318
} | class ____(BaseUserCreationFormTest):
form_class = UserCreationForm
def test_case_insensitive_username(self):
data = {
"username": "TeStClIeNt",
"password1": "test123",
"password2": "test123",
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["username"].errors,
["A user with that username already exists."],
)
@override_settings(AUTH_USER_MODEL="auth_tests.ExtensionUser")
def test_case_insensitive_username_custom_user_and_error_message(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ("date_of_birth",)
error_messages = {
"username": {"unique": "This username has already been taken."}
}
ExtensionUser.objects.create_user(
username="testclient",
password="password",
email="testclient@example.com",
date_of_birth=datetime.date(1984, 3, 5),
)
data = {
"username": "TeStClIeNt",
"password1": "test123",
"password2": "test123",
"date_of_birth": "1980-01-01",
}
form = CustomUserCreationForm(data)
self.assertIs(form.is_valid(), False)
self.assertEqual(
form["username"].errors,
["This username has already been taken."],
)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.AllowAllUsersModelBackend"]
)
| UserCreationFormTest |
python | numba__numba | numba/tests/test_npdatetime.py | {
"start": 33079,
"end": 33178
} | class ____(TestDatetimeArithmetic):
jitargs = dict(nopython=True)
| TestDatetimeArithmeticNoPython |
python | django__django | tests/test_runner/models.py | {
"start": 649,
"end": 733
} | class ____(models.Model):
people = models.ManyToManyField(Person, through=Through)
| B |
python | huggingface__transformers | tests/trainer/test_trainer.py | {
"start": 26861,
"end": 55832
} | class ____(TestCasePlus, TrainerIntegrationCommon):
"""
Only tests that want to tap into the auto-pre-run 2 trainings:
- self.default_trained_model
- self.alternate_trained_model
directly, or via check_trained_model
"""
def setUp(self):
super().setUp()
args = TrainingArguments("..")
self.n_epochs = args.num_train_epochs
self.batch_size = args.train_batch_size
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir)
trainer.train()
self.default_trained_model = (trainer.model.a, trainer.model.b)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, seed=314, output_dir=tmp_dir)
trainer.train()
self.alternate_trained_model = (trainer.model.a, trainer.model.b)
def check_trained_model(self, model, alternate_seed=False, **kwargs):
# Checks a training seeded with learning_rate = 0.1
(a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model
torch.testing.assert_close(model.a, a, **kwargs)
torch.testing.assert_close(model.b, b, **kwargs)
def test_reproducible_training(self):
# Checks that training worked, model trained and seed made a reproducible training.
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir)
trainer.train()
self.check_trained_model(trainer.model)
# Checks that a different seed gets different (reproducible) results.
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, seed=314, output_dir=tmp_dir)
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
def test_trainer_with_datasets(self):
np.random.seed(42)
x = np.random.normal(size=(64,)).astype(np.float32)
y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y})
# Base training. Should have the same results as test_reproducible_training
model = RegressionModel()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(tmp_dir, learning_rate=0.1, report_to="none")
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Can return tensors.
train_dataset.set_format(type="torch", dtype=torch.float32)
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
# Adding one column not used by the model should have no impact
z = np.random.normal(size=(64,)).astype(np.float32)
train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z})
model = RegressionModel()
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.train()
self.check_trained_model(trainer.model)
def test_model_init(self):
train_dataset = RegressionDataset()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(tmp_dir, learning_rate=0.1, report_to="none")
trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel())
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results.
trainer.train()
self.check_trained_model(trainer.model)
# Re-training should restart from scratch, thus lead the same results and new seed should be used.
trainer.args.seed = 314
trainer.train()
self.check_trained_model(trainer.model, alternate_seed=True)
@slow
def test_gradient_accumulation_loss_alignment_with_model_loss(self):
set_seed(42)
model_name = "nickypro/tinyllama-15M"
dataset_name = "wikitext"
dataset_config = "wikitext-2-raw-v1"
dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:40]")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
def tokenize_function(examples):
return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True)
tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=dataset.column_names)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
args_kwargs = {
"report_to": "none",
"logging_steps": 1,
"max_steps": 5,
"learning_rate": 3e-4,
"disable_tqdm": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
**args_kwargs,
)
# train with base loss
set_seed(42)
model = AutoModelForCausalLM.from_pretrained(model_name)
base_loss_callback = StoreLossCallback()
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[base_loss_callback],
data_collator=data_collator,
)
assert trainer.model_accepts_loss_kwargs
trainer.train()
args = TrainingArguments(
tmp_dir,
**args_kwargs,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
)
# train with gradient accumulation
set_seed(42)
model = AutoModelForCausalLM.from_pretrained(model_name)
grad_accum_loss_callback = StoreLossCallback()
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[grad_accum_loss_callback],
data_collator=data_collator,
)
assert trainer.model_accepts_loss_kwargs
trainer.train()
# train with broken loss
set_seed(42)
model = AutoModelForCausalLM.from_pretrained(model_name)
broken_loss_callback = StoreLossCallback()
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[broken_loss_callback],
data_collator=data_collator,
)
# disable model_accepts_loss_kwargs so that "num_items_in_batch" is not passed to the model
trainer.model_accepts_loss_kwargs = False
trainer.train()
# Calculate the difference between the base loss and the grad_accum loss
diff_truth = [
abs(base - grad) for base, grad in zip(base_loss_callback.losses, grad_accum_loss_callback.losses)
]
diff_broken = [abs(base - grad) for base, grad in zip(base_loss_callback.losses, broken_loss_callback.losses)]
# all diff truth should be quite close
self.assertLess(max(diff_truth), 0.01, f"Difference {max(diff_truth)} is not within 0.01")
# max diff broken should be very off ("very off" is arbitrary, but as long as it's bigger than 0.1, it's fine)
self.assertGreater(max(diff_broken), 0.7, f"Difference {max(diff_broken)} is not greater than 0.7")
loss_base = sum(base_loss_callback.losses)
loss_broken = sum(broken_loss_callback.losses)
# mean/sum loss should not vary too much.
relative_diff = abs(loss_base - loss_broken) / max(loss_base, loss_broken)
self.assertLess(relative_diff, 0.2, f"Relative difference {relative_diff} is not within 0.2")
def test_gradient_accumulation_loss_alignment_with_loss_func(self):
set_seed(42)
model_name = "roneneldan/TinyStories-33M"
dataset_name = "wikitext"
dataset_config = "wikitext-2-raw-v1"
dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:40]")
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
def tokenize_function(examples):
return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True)
tokenized_dataset = dataset.map(tokenize_function, batched=True)
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
model = AutoModelForCausalLM.from_pretrained(model_name)
def compute_loss(logits, labels, vocab_size, num_items_in_batch, disable_num_items_in_batch=False):
return ForCausalLMLoss(
logits["logits"], labels, vocab_size, num_items_in_batch, disable_num_items_in_batch
)
loss_fn = partial(compute_loss, vocab_size=model.config.vocab_size, disable_num_items_in_batch=False)
base_loss_callback = StoreLossCallback()
args_kwargs = {
"report_to": "none",
"logging_steps": 1,
"max_steps": 5,
"learning_rate": 3e-4,
"disable_tqdm": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
**args_kwargs,
)
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[base_loss_callback],
compute_loss_func=loss_fn,
data_collator=data_collator,
)
trainer.train()
grad_accum_loss_callback = StoreLossCallback()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
**args_kwargs,
gradient_accumulation_steps=2,
per_device_train_batch_size=4,
)
set_seed(42)
model = AutoModelForCausalLM.from_pretrained(model_name)
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[grad_accum_loss_callback],
compute_loss_func=loss_fn,
data_collator=data_collator,
)
trainer.train()
set_seed(42)
model = AutoModelForCausalLM.from_pretrained(model_name)
broken_loss_callback = StoreLossCallback()
loss_fn = partial(compute_loss, vocab_size=model.config.vocab_size, disable_num_items_in_batch=True)
trainer = Trainer(
model,
args,
train_dataset=tokenized_dataset,
callbacks=[broken_loss_callback],
compute_loss_func=loss_fn,
data_collator=data_collator,
)
trainer.train()
# Calculate the difference between the base loss and the grad_accum loss
diff_truth = [
abs(base - grad) for base, grad in zip(base_loss_callback.losses, grad_accum_loss_callback.losses)
]
diff_broken = [
abs(base - grad) for base, grad in zip(base_loss_callback.losses, broken_loss_callback.losses)
]
# all diff truth should be quite close
self.assertLess(max(diff_truth), 0.01, f"Difference {max(diff_truth)} is not within 0.01")
# max diff broken should be very off
self.assertGreater(max(diff_broken), 3, f"Difference {max(diff_broken)} is not greater than 3")
def test_gradient_accumulation(self):
# Training with half the batch size but accumulation steps as 2 should give the same training losses.
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1, output_dir=tmp_dir
)
trainer.train()
self.check_trained_model(trainer.model)
def test_gradient_checkpointing(self):
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(
per_device_train_batch_size=1,
learning_rate=0.1,
gradient_checkpointing=True,
gradient_checkpointing_kwargs={"use_reentrant": False},
output_dir=tmp_dir,
)
previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()}
trainer.train()
# Check if model weights have been updated
for k, v in trainer.model.named_parameters():
self.assertFalse(
torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4),
f"Model weights for {k} have not been updated",
)
def test_training_loss(self):
n_gpus = max(1, backend_device_count(torch_device))
# With even logs
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus), output_dir=tmp_dir)
trainer.train()
log_history = trainer.state.log_history
losses = [log["loss"] for log in log_history if "loss" in log]
train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4)
# With uneven logs
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(logging_steps=5, output_dir=tmp_dir)
trainer.train()
log_history = trainer.state.log_history
# Training loss should be the same as before
new_train_loss = log_history[-1]["train_loss"]
self.assertAlmostEqual(train_loss, new_train_loss, places=4)
def test_custom_optimizer(self):
train_dataset = RegressionDataset()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(tmp_dir, report_to="none")
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0)
def test_lr_scheduler_kwargs(self):
# test scheduler kwargs passed via TrainingArguments
train_dataset = RegressionDataset()
model = RegressionModel()
num_steps, num_warmup_steps = 10, 2
extra_kwargs = {"power": 5.0, "lr_end": 1e-5} # Non-default arguments
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
lr_scheduler_type="polynomial",
lr_scheduler_kwargs=extra_kwargs,
learning_rate=0.2,
warmup_steps=num_warmup_steps,
report_to="none",
)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.create_optimizer_and_scheduler(num_training_steps=num_steps)
# Checking that the scheduler was created
self.assertIsNotNone(trainer.lr_scheduler)
# Checking that the correct args were passed
sched1 = trainer.lr_scheduler
sched2 = get_polynomial_decay_schedule_with_warmup(
trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs
)
self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args)
self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords)
def test_cosine_with_min_lr_scheduler(self):
train_dataset = RegressionDataset()
model = RegressionModel()
num_steps, num_warmup_steps = 10, 2
extra_kwargs = {"min_lr": 1e-5} # Non-default arguments
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
lr_scheduler_type="cosine_with_min_lr",
lr_scheduler_kwargs=extra_kwargs,
learning_rate=0.2,
warmup_steps=num_warmup_steps,
report_to="none",
)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.create_optimizer_and_scheduler(num_training_steps=num_steps)
# Checking that the scheduler was created
self.assertIsNotNone(trainer.lr_scheduler)
# Check the last learning rate
for _ in range(num_steps):
trainer.lr_scheduler.step()
self.assertEqual(trainer.lr_scheduler.get_last_lr()[0], 1e-5)
def test_cosine_with_min_lr_schedule_with_warmup_lr_rate(self):
train_dataset = RegressionDataset()
model = RegressionModel()
num_steps, num_warmup_steps = 10, 2
extra_kwargs = {"min_lr": 1e-5} # Non-default arguments
args = TrainingArguments(
"./regression",
lr_scheduler_type="cosine_warmup_with_min_lr",
lr_scheduler_kwargs=extra_kwargs,
learning_rate=0.2,
warmup_steps=num_warmup_steps,
report_to="none",
)
trainer = Trainer(model, args, train_dataset=train_dataset)
trainer.create_optimizer_and_scheduler(num_training_steps=num_steps)
# Checking that the scheduler was created
self.assertIsNotNone(trainer.lr_scheduler)
# Check the last learning rate
step_lrs = []
for _ in range(num_steps):
step_lrs.append(trainer.optimizer.param_groups[0]["lr"])
trainer.lr_scheduler.step()
self.assertEqual(step_lrs[0], 0.1)
self.assertEqual(step_lrs[1], 0.2)
self.assertEqual(step_lrs[-1], 1e-05)
def test_reduce_lr_on_plateau_args(self):
# test passed arguments for a custom ReduceLROnPlateau scheduler
train_dataset = RegressionDataset(length=64)
eval_dataset = RegressionDataset(length=64)
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
eval_strategy="epoch",
metric_for_best_model="eval_loss",
report_to="none",
)
model = RegressionModel()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2)
trainer = Trainer(
model,
args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
optimizers=(optimizer, lr_scheduler),
)
trainer.train()
self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)
self.assertEqual(trainer.lr_scheduler.factor, 0.2)
self.assertEqual(trainer.lr_scheduler.patience, 5)
self.assertEqual(trainer.lr_scheduler.cooldown, 2)
def test_reduce_lr_on_plateau(self):
# test the ReduceLROnPlateau scheduler
class TrainerWithLRLogs(Trainer):
def log(self, logs):
# the LR is computed after metrics and does not exist for the first epoch
if hasattr(self.lr_scheduler, "_last_lr"):
logs["learning_rate"] = self.lr_scheduler._last_lr[0]
super().log(logs)
train_dataset = RegressionDataset(length=64)
eval_dataset = RegressionDataset(length=64)
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(
tmp_dir,
lr_scheduler_type="reduce_lr_on_plateau",
eval_strategy="epoch",
metric_for_best_model="eval_loss",
num_train_epochs=10,
learning_rate=0.2,
report_to="none",
)
model = RegressionModel()
trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset)
trainer.train()
self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)
patience = trainer.lr_scheduler.patience
logs = trainer.state.log_history[1:]
best_loss = logs[0]["eval_loss"]
bad_epochs = 0
for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's
loss = log["eval_loss"]
just_decreased = False
if loss > best_loss:
bad_epochs += 1
if bad_epochs > patience:
self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"])
just_decreased = True
bad_epochs = 0
else:
best_loss = loss
bad_epochs = 0
if not just_decreased:
self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"])
def test_adafactor_lr_none(self):
# test the special case where lr=None, since Trainer can't not have lr_scheduler
from transformers.optimization import Adafactor, AdafactorSchedule
train_dataset = RegressionDataset()
with tempfile.TemporaryDirectory() as tmp_dir:
args = TrainingArguments(tmp_dir, report_to="none")
model = RegressionModel()
optimizer = Adafactor(
model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None
)
lr_scheduler = AdafactorSchedule(optimizer)
trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler))
trainer.train()
(a, b) = self.default_trained_model
self.assertFalse(torch.allclose(trainer.model.a, a))
self.assertFalse(torch.allclose(trainer.model.b, b))
self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0)
@require_torch_fp16
@require_torch_accelerator
def test_mixed_fp16(self):
# very basic test
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, fp16=True, logging_steps=1, output_dir=tmp_dir)
trainer.train()
self.check_trained_model(trainer.model, atol=ATOL, rtol=RTOL)
log_0 = trainer.state.log_history[:-1][0]
# check that the grads were properly clipped due to the grad scaler. Otherwise, we get huge values
self.assertEqual(log_0["grad_norm"] < 100, True)
@require_torch_bf16
@require_torch_accelerator
def test_mixed_bf16(self):
# very basic test
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, bf16=True, output_dir=tmp_dir)
trainer.train()
self.check_trained_model(trainer.model, atol=ATOL, rtol=RTOL)
@require_torch_gpu
@require_torch_tf32
def test_tf32(self):
# very basic test
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = get_regression_trainer(learning_rate=0.1, tf32=True, output_dir=tmp_dir)
trainer.train()
self.check_trained_model(trainer.model)
def test_include_num_input_tokens_seen(self):
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer.pad_token = "[PAD]"
model.config.pad_token_id = tokenizer.pad_token_id
sentences = ["This is a short sentence.", "This is a much longer sentence that will require padding."]
labels = torch.tensor([0, 1])
# 1. Test with attention_mask
tokenized_dataset_with_mask = tokenizer(sentences, truncation=True, padding="longest", return_tensors="pt")
tokenized_dataset_with_mask["labels"] = labels
dataset_with_mask = datasets.Dataset.from_dict(tokenized_dataset_with_mask)
# 2. Test without attention_mask
tokenized_dataset_no_mask = {k: v for k, v in tokenized_dataset_with_mask.items() if k != "attention_mask"}
dataset_no_mask = datasets.Dataset.from_dict(tokenized_dataset_no_mask)
# 3. Test with no padding information
tokenizer_no_pad = AutoTokenizer.from_pretrained("bert-base-cased")
tokenizer_no_pad.pad_token = None
data_collator = default_data_collator
with tempfile.TemporaryDirectory() as tmp_dir:
# Test case 1: "non_padding" with attention_mask
args = TrainingArguments(
output_dir=tmp_dir,
include_num_input_tokens_seen="non_padding",
per_device_train_batch_size=2,
max_steps=1,
report_to="none",
)
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_with_mask,
data_collator=data_collator,
processing_class=tokenizer,
)
trainer.train()
attention_mask = tokenized_dataset_with_mask["attention_mask"]
non_padded_tokens_with_mask = attention_mask.sum().item()
self.assertEqual(trainer.state.num_input_tokens_seen, non_padded_tokens_with_mask)
# Test case 2: "non_padding" without attention_mask (fallback to pad_token_id)
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_no_mask,
data_collator=data_collator,
processing_class=tokenizer,
)
trainer.train()
input_ids = tokenized_dataset_with_mask["input_ids"] # use original to compute expected
non_padded_tokens_no_mask = (input_ids != tokenizer.pad_token_id).sum().item()
self.assertEqual(trainer.state.num_input_tokens_seen, non_padded_tokens_no_mask)
# Test case 3: "non_padding" with no padding info (fallback to numel)
with self.assertLogs("transformers.trainer", level="WARNING") as cm:
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_no_mask, # still has input_ids
data_collator=data_collator,
processing_class=tokenizer_no_pad, # tokenizer without pad token
)
trainer.train()
self.assertTrue(
any("Could not determine method to count non-padding tokens" in log for log in cm.output)
)
total_tokens = input_ids.numel()
self.assertEqual(trainer.state.num_input_tokens_seen, total_tokens)
# Test case 4: "all"
args.include_num_input_tokens_seen = "all"
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_with_mask,
data_collator=data_collator,
processing_class=tokenizer,
)
trainer.train()
self.assertEqual(trainer.state.num_input_tokens_seen, total_tokens)
# Test case 5: True (backward compatibility)
args.include_num_input_tokens_seen = True
trainer = Trainer(
model=model,
args=args,
train_dataset=dataset_with_mask,
data_collator=data_collator,
processing_class=tokenizer,
)
trainer.train()
self.assertEqual(trainer.state.num_input_tokens_seen, total_tokens)
@require_torch
@require_sentencepiece
@require_tokenizers
| TrainerIntegrationPrerunTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1506811,
"end": 1507441
} | class ____(sgqlc.types.Type, Node):
"""Represents a 'subscribed' event on a given `Subscribable`."""
__schema__ = github_schema
__field_names__ = ("actor", "created_at", "subscribable")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
subscribable = sgqlc.types.Field(sgqlc.types.non_null(Subscribable), graphql_name="subscribable")
"""Object referenced by event."""
| SubscribedEvent |
python | pytorch__pytorch | torch/utils/data/datapipes/utils/decoder.py | {
"start": 9047,
"end": 10100
} | class ____:
def __init__(self, **loadmat_kwargs) -> None:
try:
import scipy.io as sio
except ImportError as e:
raise ModuleNotFoundError(
"Package `scipy` is required to be installed for mat file."
"Please use `pip install scipy`"
"to install the package"
) from e
self.sio = sio
self.loadmat_kwargs = loadmat_kwargs
def __call__(self, extension, data):
if extension != "mat":
return None
with io.BytesIO(data) as stream:
return self.sio.loadmat(stream, **self.loadmat_kwargs)
def mathandler(**loadmat_kwargs):
return MatHandler(**loadmat_kwargs)
################################################################
# a sample decoder
################################################################
# Extract extension from pathname
def extension_extract_fn(pathname):
ext = os.path.splitext(pathname)[1]
# Remove dot
if ext:
ext = ext[1:]
return ext
| MatHandler |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | {
"start": 35884,
"end": 38948
} | class ____(XLMRobertaXLPreTrainedModel):
_tied_weights_keys = {
"lm_head.decoder.weight": "roberta.embeddings.word_embeddings.weight",
"lm_head.decoder.bias": "lm_head.bias",
}
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `RobertaForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roberta = XLMRobertaXLModel(config, add_pooling_layer=False)
self.lm_head = XLMRobertaXLLMHead(config)
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
self.lm_head.bias = new_embeddings.bias
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
return_dict=True,
**kwargs,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
XLM-RoBERTa-XL Model transformer with a sequence classification/regression head on top (a linear layer on top
of the pooled output) e.g. for GLUE tasks.
"""
)
| XLMRobertaXLForMaskedLM |
python | kamyu104__LeetCode-Solutions | Python/longest-absolute-file-path.py | {
"start": 62,
"end": 797
} | class ____(object):
def lengthLongestPath(self, input):
"""
:type input: str
:rtype: int
"""
def split_iter(s, tok):
start = 0
for i in xrange(len(s)):
if s[i] == tok:
yield s[start:i]
start = i + 1
yield s[start:]
max_len = 0
path_len = {0: 0}
for line in split_iter(input, '\n'):
name = line.lstrip('\t')
depth = len(line) - len(name)
if '.' in name:
max_len = max(max_len, path_len[depth] + len(name))
else:
path_len[depth + 1] = path_len[depth] + len(name) + 1
return max_len
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-lists/integration_tests/fast/test_airbyte_standards.py | {
"start": 384,
"end": 734
} | class ____(standard_tests.DeclarativeSourceTestSuite):
"""Test suite for the Airbyte standard tests.
This class inherits from SourceTestSuiteBase and implements all of the tests in the suite.
As long as the class name starts with "Test", pytest will automatically discover and run the
tests in this class.
"""
| TestAirbyteStandardTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/test_product_dimension_performance_report.py | {
"start": 220,
"end": 1859
} | class ____(TestSuiteReportStream):
state_file_after_migration = "non_hourly_reports_state_after_migration"
state_file_after_migration_with_cursor_further_config_start_date = (
"non_hourly_reports_state_after_migration_with_cursor_further_config_start_date"
)
first_read_state = get_state_after_migration(time_period="2024-05-17", account_id=TestSuiteReportStream.account_id)
second_read_state = get_state_after_migration(time_period="2023-12-25", account_id=TestSuiteReportStream.account_id)
first_read_state_for_records_further_start_date = get_state_after_migration(
time_period="2024-05-06", account_id=TestSuiteReportStream.account_id
)
second_read_state_for_records_further_start_date = get_state_after_migration(
time_period="2024-05-07", account_id=TestSuiteReportStream.account_id
)
second_read_state_for_records_before_start_date = get_state_after_migration(
time_period="2024-01-01", account_id=TestSuiteReportStream.account_id
)
def mock_report_apis(self):
self.mock_user_query_api(response_template="user_query")
self.mock_accounts_search_api(
response_template="accounts_search_for_report",
body=b'{"PageInfo": {"Index": 0, "Size": 1000}, "Predicates": [{"Field": "UserId", "Operator": "Equals", "Value": "123456789"}], "ReturnAdditionalFields": "TaxCertificate,AccountMode"}',
)
self.mock_generate_report_api(
endpoint="Poll", response_template="generate_report_poll", body=b'{"ReportRequestId": "thisisthereport_requestid"}'
)
| TestBaseProductDimensionPerformanceReport |
python | getsentry__sentry-python | sentry_sdk/integrations/django/transactions.py | {
"start": 1045,
"end": 4951
} | class ____:
_new_style_group_matcher = re.compile(
r"<(?:([^>:]+):)?([^>]+)>"
) # https://github.com/django/django/blob/21382e2743d06efbf5623e7c9b6dccf2a325669b/django/urls/resolvers.py#L245-L247
_optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
_named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)+")
_non_named_group_matcher = re.compile(r"\([^\)]+\)")
# [foo|bar|baz]
_either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
_camel_re = re.compile(r"([A-Z]+)([a-z])")
_cache = {} # type: Dict[URLPattern, str]
def _simplify(self, pattern):
# type: (Union[URLPattern, URLResolver]) -> str
r"""
Clean up urlpattern regexes into something readable by humans:
From:
> "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
To:
> "{sport_slug}/athletes/{athlete_slug}/"
"""
# "new-style" path patterns can be parsed directly without turning them
# into regexes first
if (
RoutePattern is not None
and hasattr(pattern, "pattern")
and isinstance(pattern.pattern, RoutePattern)
):
return self._new_style_group_matcher.sub(
lambda m: "{%s}" % m.group(2), str(pattern.pattern._route)
)
result = get_regex(pattern).pattern
# remove optional params
# TODO(dcramer): it'd be nice to change these into [%s] but it currently
# conflicts with the other rules because we're doing regexp matches
# rather than parsing tokens
result = self._optional_group_matcher.sub(lambda m: "%s" % m.group(1), result)
# handle named groups first
result = self._named_group_matcher.sub(lambda m: "{%s}" % m.group(1), result)
# handle non-named groups
result = self._non_named_group_matcher.sub("{var}", result)
# handle optional params
result = self._either_option_matcher.sub(lambda m: m.group(1), result)
# clean up any outstanding regex-y characters.
result = (
result.replace("^", "")
.replace("$", "")
.replace("?", "")
.replace("\\A", "")
.replace("\\Z", "")
.replace("//", "/")
.replace("\\", "")
)
return result
def _resolve(self, resolver, path, parents=None):
# type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]
match = get_regex(resolver).search(path) # Django < 2.0
if not match:
return None
if parents is None:
parents = [resolver]
elif resolver not in parents:
parents = parents + [resolver]
new_path = path[match.end() :]
for pattern in resolver.url_patterns:
# this is an include()
if not pattern.callback:
match_ = self._resolve(pattern, new_path, parents)
if match_:
return match_
continue
elif not get_regex(pattern).search(new_path):
continue
try:
return self._cache[pattern]
except KeyError:
pass
prefix = "".join(self._simplify(p) for p in parents)
result = prefix + self._simplify(pattern)
if not result.startswith("/"):
result = "/" + result
self._cache[pattern] = result
return result
return None
def resolve(
self,
path, # type: str
urlconf=None, # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]
):
# type: (...) -> Optional[str]
resolver = get_resolver(urlconf)
match = self._resolve(resolver, path)
return match
LEGACY_RESOLVER = RavenResolver()
| RavenResolver |
python | plotly__plotly.py | plotly/graph_objs/layout/_font.py | {
"start": 235,
"end": 9909
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets the global font. Note that fonts used in traces and other
layout components inherit from the global font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | huggingface__transformers | tests/pipelines/test_pipelines_image_classification.py | {
"start": 1388,
"end": 10995
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
cls._dataset = datasets.load_dataset(
"hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
image_classifier = ImageClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
top_k=2,
)
examples = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
return image_classifier, examples
def run_pipeline_test(self, image_classifier, examples):
self._load_dataset()
outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png")
self.assertEqual(
outputs,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
# Accepts URL + PIL.Image + lists
outputs = image_classifier(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
self._dataset[0]["image"],
# LA
self._dataset[1]["image"],
# L
self._dataset[2]["image"],
]
)
self.assertEqual(
outputs,
[
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
],
)
for single_output in outputs:
for output_element in single_output:
compare_pipeline_output_to_hub_spec(output_element, ImageClassificationOutputElement)
@require_torch
def test_small_model_pt(self):
small_model = "hf-internal-testing/tiny-random-vit"
image_classifier = pipeline("image-classification", model=small_model)
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
)
outputs = image_classifier(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
],
top_k=2,
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
],
)
def test_custom_tokenizer(self):
tokenizer = PreTrainedTokenizerBase()
# Assert that the pipeline can be initialized with a feature extractor that is not in any mapping
image_classifier = pipeline(
"image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer
)
self.assertIs(image_classifier.tokenizer, tokenizer)
@require_torch
def test_torch_float16_pipeline(self):
image_classifier = pipeline(
"image-classification", model="hf-internal-testing/tiny-random-vit", dtype=torch.float16
)
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=3),
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
)
@require_torch
def test_torch_bfloat16_pipeline(self):
image_classifier = pipeline(
"image-classification", model="hf-internal-testing/tiny-random-vit", dtype=torch.bfloat16
)
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=3),
[{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}],
)
@slow
@require_torch
def test_perceiver(self):
# Perceiver is not tested by `run_pipeline_test` properly.
# That is because the type of feature_extractor and model preprocessor need to be kept
# in sync, which is not the case in the current design
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.4385, "label": "tabby, tabby cat"},
{"score": 0.321, "label": "tiger cat"},
{"score": 0.0502, "label": "Egyptian cat"},
{"score": 0.0137, "label": "crib, cot"},
{"score": 0.007, "label": "radiator"},
],
)
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.5658, "label": "tabby, tabby cat"},
{"score": 0.1309, "label": "tiger cat"},
{"score": 0.0722, "label": "Egyptian cat"},
{"score": 0.0707, "label": "remote control, remote"},
{"score": 0.0082, "label": "computer keyboard, keypad"},
],
)
image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned")
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
{"score": 0.3022, "label": "tabby, tabby cat"},
{"score": 0.2362, "label": "Egyptian cat"},
{"score": 0.1856, "label": "tiger cat"},
{"score": 0.0324, "label": "remote control, remote"},
{"score": 0.0096, "label": "quilt, comforter, comfort, puff"},
],
)
@slow
@require_torch
def test_multilabel_classification(self):
small_model = "hf-internal-testing/tiny-random-vit"
# Sigmoid is applied for multi-label classification
image_classifier = pipeline("image-classification", model=small_model)
image_classifier.model.config.problem_type = "multi_label_classification"
outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}],
)
outputs = image_classifier(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
]
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[
[{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}],
[{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}],
],
)
@slow
@require_torch
def test_function_to_apply(self):
small_model = "hf-internal-testing/tiny-random-vit"
# Sigmoid is applied for multi-label classification
image_classifier = pipeline("image-classification", model=small_model)
outputs = image_classifier(
"http://images.cocodataset.org/val2017/000000039769.jpg",
function_to_apply="sigmoid",
)
self.assertEqual(
nested_simplify(outputs, decimals=4),
[{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}],
)
| ImageClassificationPipelineTests |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid/admin.py | {
"start": 134,
"end": 289
} | class ____(admin.ModelAdmin):
pass
admin.site.register(OpenIDStore, OpenIDStoreAdmin)
admin.site.register(OpenIDNonce, OpenIDNonceAdmin)
| OpenIDNonceAdmin |
python | realpython__materials | python-del-statement/factorial.py | {
"start": 0,
"end": 774
} | class ____:
def __init__(self, number):
self._number = number
self._cache = {0: 1, 1: 1}
self._factorial = self._calculate_factorial(number)
del self._cache
def _calculate_factorial(self, number):
if number in self._cache:
return self._cache[number]
current_factorial = number * self._calculate_factorial(number - 1)
self._cache[number] = current_factorial
return current_factorial
@property
def number(self):
return self._number
@property
def factorial(self):
return self._factorial
def __str__(self) -> str:
return f"{self._number}! = {self._factorial}"
def __repr__(self):
return f"{type(self).__name__}({self._number})"
| Factorial |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 21644,
"end": 21916
} | class ____(BaseModel):
"""
Scheduler info serializer for responses.
"""
status: Annotated[str | None, Field(title="Status")] = None
latest_scheduler_heartbeat: Annotated[str | None, Field(title="Latest Scheduler Heartbeat")] = None
| SchedulerInfoResponse |
python | sqlalchemy__sqlalchemy | test/orm/dml/test_orm_upd_del_assorted.py | {
"start": 13023,
"end": 14090
} | class ____(fixtures.DeclarativeMappedTest):
__sparse_driver_backend__ = True
__only_on__ = ("postgresql",)
@classmethod
def setup_classes(cls):
from sqlalchemy.dialects.postgresql import JSONB
Base = cls.DeclarativeBasic
class TestTbl(Base):
__tablename__ = "testtbl"
test_id = Column(Integer, primary_key=True)
test_field = Column(JSONB)
def test_issue_11849(self):
TestTbl = self.classes.TestTbl
session = fixture_session()
obj = TestTbl(
test_id=1, test_field={"test1": 1, "test2": "2", "test3": [3, "3"]}
)
session.add(obj)
query = (
update(TestTbl)
.where(TestTbl.test_id == 1)
.values(test_field=TestTbl.test_field + {"test3": {"test4": 4}})
)
session.execute(query)
# not loaded
assert "test_field" not in obj.__dict__
# synchronizes on load
eq_(obj.test_field, {"test1": 1, "test2": "2", "test3": {"test4": 4}})
| PGIssue11849Test |
python | pikepdf__pikepdf | tests/test_parsers.py | {
"start": 9541,
"end": 10720
} | class ____:
def test_indirect_object(self):
p = pikepdf.new()
arr = p.make_indirect(Array([42]))
d = p.make_indirect(Dictionary(Foo=Name.Bar))
stream = p.make_stream(b'test stream')
with pytest.raises(TypeError):
ContentStreamInstruction([arr], Operator('Do'))
with pytest.raises(TypeError):
ContentStreamInstruction([d], Operator('Do'))
with pytest.raises(TypeError):
ContentStreamInstruction([Name.Him, stream], Operator('Do'))
def test_string_parse_unparse_pdfdoc():
pdf = pikepdf.new()
parsed = pikepdf.parse_content_stream(pikepdf.Stream(pdf, b'(Hello, world!) Tj'))
assert pikepdf.unparse_content_stream(parsed) == b'(Hello, world!) Tj'
def test_string_parse_unparse_not_pdfdoc_safe():
pdf = pikepdf.new()
hello_world_chinese = '你好世界'
hello_world_chinese_pdf_encoded = hello_world_chinese.encode('utf-16be').hex()
parsed = pikepdf.parse_content_stream(
pikepdf.Stream(pdf, f'<{hello_world_chinese_pdf_encoded}> Tj'.encode('ascii'))
)
assert pikepdf.unparse_content_stream(parsed) == b'<4f60597d4e16754c> Tj'
| TestBadSingleInstructions |
python | PrefectHQ__prefect | src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py | {
"start": 7971,
"end": 8146
} | class ____(BaseModel):
"""
The capacity provider strategy to use when running the task.
"""
capacityProvider: str
weight: int
base: int
| CapacityProvider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/coercions.py | {
"start": 14504,
"end": 14850
} | class ____(RoleImpl):
__slots__ = ()
def _implicit_coercions(self, element, resolved, argname=None, **kw):
if isinstance(element, str):
return element
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
| _ReturnsStringKey |
python | paramiko__paramiko | paramiko/ssh_exception.py | {
"start": 974,
"end": 1252
} | class ____(SSHException):
"""
Exception raised when authentication failed for some reason. It may be
possible to retry with different credentials. (Other classes specify more
specific reasons.)
.. versionadded:: 1.6
"""
pass
| AuthenticationException |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk3.py | {
"start": 22540,
"end": 22647
} | class ____(_BackendGTK):
FigureCanvas = FigureCanvasGTK3
FigureManager = FigureManagerGTK3
| _BackendGTK3 |
python | spack__spack | lib/spack/spack/stage.py | {
"start": 48433,
"end": 48513
} | class ____(StageError):
"""Error encountered during restaging."""
| RestageError |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 78178,
"end": 78369
} | class ____(BiffRecord):
"""
Semantic is equal to HEADER record
"""
_REC_ID = 0x0015
def __init__(self, footer_str):
self._rec_data = upack2(footer_str)
| FooterRecord |
python | getsentry__sentry | src/sentry/sentry_metrics/querying/visitors/query_condition.py | {
"start": 3230,
"end": 3902
} | class ____(QueryConditionVisitor[QueryCondition]):
"""
Visitor that recursively transforms all conditions whose `key` matches one of the supplied mappings. If found,
replaces it with the mapped value.
"""
def __init__(self, mappings: Mapping[str, str]):
self._mappings = mappings
def _visit_condition(self, condition: Condition) -> QueryCondition:
if not isinstance(condition.lhs, Column):
return condition
return Condition(
lhs=Column(name=self._mappings.get(condition.lhs.key, condition.lhs.name)),
op=condition.op,
rhs=condition.rhs,
)
| MappingTransformationVisitor |
python | pytorch__pytorch | torch/_dynamo/variables/user_defined.py | {
"start": 80306,
"end": 80628
} | class ____(UserDefinedObjectVariable):
# Dummy class to check if the object is an IntWrapper, and turn it into a
# symint
@staticmethod
def is_matching_object(obj):
mod = sys.modules.get("torch.export.dynamic_shapes")
return mod is not None and type(obj) is mod._IntWrapper
| IntWrapperVariable |
python | getsentry__sentry | src/sentry/utils/types.py | {
"start": 2537,
"end": 2855
} | class ____(Type[float]):
"""Coerce a float from a string or integer"""
name = "float"
default = 0.0
expected_types = (float,)
compatible_types = (str, int, float)
def convert(self, value):
try:
return float(value)
except ValueError:
return None
| FloatType |
python | apache__airflow | providers/apache/beam/tests/unit/apache/beam/hooks/test_beam.py | {
"start": 2977,
"end": 14553
} | class ____:
@mock.patch(BEAM_STRING.format("run_beam_command"))
@mock.patch("airflow.providers.apache.beam.hooks.beam.subprocess.check_output", return_value=b"2.39.0")
def test_start_python_pipeline(self, mock_check_output, mock_runner):
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
is_dataflow_job_id_exist_callback = MagicMock()
hook.start_python_pipeline(
variables=copy.deepcopy(BEAM_VARIABLES_PY),
py_file=PY_FILE,
py_options=PY_OPTIONS,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
expected_cmd = [
"python3",
"-m",
PY_FILE,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
"--labels=foo=bar",
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=None,
log=ANY,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
@mock.patch("airflow.providers.apache.beam.hooks.beam.subprocess.check_output", return_value=b"2.35.0")
def test_start_python_pipeline_unsupported_option(self, mock_check_output):
hook = BeamHook(runner=DEFAULT_RUNNER)
with pytest.raises(
AirflowException,
match=re.escape("The impersonateServiceAccount option requires Apache Beam 2.39.0 or newer."),
):
hook.start_python_pipeline(
variables={
"impersonate_service_account": "test@impersonation.com",
},
py_file="/tmp/file.py",
py_options=["-m"],
py_interpreter="python3",
py_requirements=None,
py_system_site_packages=False,
process_line_callback=MagicMock(),
is_dataflow_job_id_exist_callback=MagicMock(),
)
@pytest.mark.parametrize(
"py_interpreter",
[
pytest.param("python", id="default python"),
pytest.param("python2", id="major python version 2.x"),
pytest.param("python3", id="major python version 3.x"),
pytest.param("python3.6", id="major.minor python version"),
],
)
@mock.patch(BEAM_STRING.format("run_beam_command"))
@mock.patch("airflow.providers.apache.beam.hooks.beam.subprocess.check_output", return_value=b"2.39.0")
def test_start_python_pipeline_with_custom_interpreter(
self, mock_check_output, mock_runner, py_interpreter
):
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
is_dataflow_job_id_exist_callback = MagicMock()
hook.start_python_pipeline(
variables=copy.deepcopy(BEAM_VARIABLES_PY),
py_file=PY_FILE,
py_options=PY_OPTIONS,
py_interpreter=py_interpreter,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
expected_cmd = [
py_interpreter,
"-m",
PY_FILE,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
"--labels=foo=bar",
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=None,
log=ANY,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
@pytest.mark.parametrize(
("current_py_requirements", "current_py_system_site_packages"),
[
pytest.param("foo-bar", False, id="requirements without system site-packages"),
pytest.param("foo-bar", True, id="requirements with system site-packages"),
pytest.param([], True, id="only system site-packages"),
],
)
@mock.patch(BEAM_STRING.format("prepare_virtualenv"))
@mock.patch(BEAM_STRING.format("run_beam_command"))
@mock.patch("airflow.providers.apache.beam.hooks.beam.subprocess.check_output", return_value=b"2.39.0")
def test_start_python_pipeline_with_non_empty_py_requirements_and_without_system_packages(
self,
mock_check_output,
mock_runner,
mock_virtualenv,
current_py_requirements,
current_py_system_site_packages,
):
hook = BeamHook(runner=DEFAULT_RUNNER)
mock_virtualenv.return_value = "/dummy_dir/bin/python"
process_line_callback = MagicMock()
is_dataflow_job_id_exist_callback = MagicMock()
hook.start_python_pipeline(
variables=copy.deepcopy(BEAM_VARIABLES_PY),
py_file=PY_FILE,
py_options=PY_OPTIONS,
py_requirements=current_py_requirements,
py_system_site_packages=current_py_system_site_packages,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
expected_cmd = [
"/dummy_dir/bin/python",
"-m",
PY_FILE,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
"--labels=foo=bar",
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
working_directory=None,
log=ANY,
)
mock_virtualenv.assert_called_once_with(
venv_directory=mock.ANY,
python_bin="python3",
system_site_packages=current_py_system_site_packages,
requirements=current_py_requirements,
)
@mock.patch(BEAM_STRING.format("run_beam_command"))
@mock.patch("airflow.providers.apache.beam.hooks.beam.subprocess.check_output", return_value=b"2.39.0")
def test_start_python_pipeline_with_empty_py_requirements_and_without_system_packages(
self, mock_check_output, mock_runner
):
hook = BeamHook(runner=DEFAULT_RUNNER)
wait_for_done = mock_runner.return_value.wait_for_done
process_line_callback = MagicMock()
is_dataflow_job_id_exist_callback = MagicMock()
with pytest.raises(AirflowException, match=r"Invalid method invocation\."):
hook.start_python_pipeline(
variables=copy.deepcopy(BEAM_VARIABLES_PY),
py_file=PY_FILE,
py_options=PY_OPTIONS,
py_requirements=[],
process_line_callback=process_line_callback,
is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback,
)
mock_runner.assert_not_called()
wait_for_done.assert_not_called()
@mock.patch(BEAM_STRING.format("run_beam_command"))
def test_start_java_pipeline(self, mock_runner):
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
hook.start_java_pipeline(
jar=JAR_FILE,
variables=copy.deepcopy(BEAM_VARIABLES_JAVA),
process_line_callback=process_line_callback,
)
expected_cmd = [
"java",
"-jar",
JAR_FILE,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
'--labels={"foo":"bar"}',
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=None,
log=ANY,
is_dataflow_job_id_exist_callback=None,
)
@mock.patch(BEAM_STRING.format("run_beam_command"))
def test_start_java_pipeline_with_job_class(self, mock_runner):
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
hook.start_java_pipeline(
jar=JAR_FILE,
variables=copy.deepcopy(BEAM_VARIABLES_JAVA),
job_class=JOB_CLASS,
process_line_callback=process_line_callback,
)
expected_cmd = [
"java",
"-cp",
JAR_FILE,
JOB_CLASS,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
'--labels={"foo":"bar"}',
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=None,
log=ANY,
is_dataflow_job_id_exist_callback=None,
)
@mock.patch(BEAM_STRING.format("shutil.which"))
@mock.patch(BEAM_STRING.format("run_beam_command"))
def test_start_go_pipeline(self, mock_runner, mock_which):
mock_which.return_value = "/some_path/to/go"
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
hook.start_go_pipeline(
go_file=GO_FILE,
variables=copy.deepcopy(BEAM_VARIABLES_GO),
process_line_callback=process_line_callback,
)
basename = os.path.basename(GO_FILE)
go_workspace = os.path.dirname(GO_FILE)
expected_cmd = [
"go",
"run",
basename,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
'--labels={"foo":"bar"}',
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=go_workspace,
log=ANY,
is_dataflow_job_id_exist_callback=None,
)
@mock.patch(BEAM_STRING.format("shutil.which"))
def test_start_go_pipeline_without_go_installed_raises(self, mock_which):
mock_which.return_value = None
hook = BeamHook(runner=DEFAULT_RUNNER)
error_message = (
r"You need to have Go installed to run beam go pipeline\. See .* "
"installation guide. If you are running airflow in Docker see more info at '.*'"
)
with pytest.raises(ConfigException, match=error_message):
hook.start_go_pipeline(
go_file=GO_FILE,
variables=copy.deepcopy(BEAM_VARIABLES_GO),
)
@mock.patch(BEAM_STRING.format("run_beam_command"))
def test_start_go_pipeline_with_binary(self, mock_runner):
hook = BeamHook(runner=DEFAULT_RUNNER)
process_line_callback = MagicMock()
launcher_binary = "/path/to/launcher-main"
worker_binary = "/path/to/worker-main"
hook.start_go_pipeline_with_binary(
variables=BEAM_VARIABLES_GO,
launcher_binary=launcher_binary,
worker_binary=worker_binary,
process_line_callback=process_line_callback,
)
expected_cmd = [
launcher_binary,
f"--runner={DEFAULT_RUNNER}",
"--output=gs://test/output",
'--labels={"foo":"bar"}',
f"--worker_binary={worker_binary}",
]
mock_runner.assert_called_once_with(
cmd=expected_cmd,
process_line_callback=process_line_callback,
working_directory=None,
log=ANY,
is_dataflow_job_id_exist_callback=None,
)
| TestBeamHook |
python | doocs__leetcode | solution/0700-0799/0700.Search in a Binary Search Tree/Solution.py | {
"start": 192,
"end": 508
} | class ____:
def searchBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if root is None or root.val == val:
return root
return (
self.searchBST(root.left, val)
if root.val > val
else self.searchBST(root.right, val)
)
| Solution |
python | openai__openai-python | src/openai/types/responses/response_output_text_param.py | {
"start": 833,
"end": 1344
} | class ____(TypedDict, total=False):
end_index: Required[int]
"""The index of the last character of the URL citation in the message."""
start_index: Required[int]
"""The index of the first character of the URL citation in the message."""
title: Required[str]
"""The title of the web resource."""
type: Required[Literal["url_citation"]]
"""The type of the URL citation. Always `url_citation`."""
url: Required[str]
"""The URL of the web resource."""
| AnnotationURLCitation |
python | gevent__gevent | src/gevent/events.py | {
"start": 6901,
"end": 7234
} | class ____(object):
"""
The event emitted when the event loop is blocked.
Implements `IEventLoopBlocked`.
"""
def __init__(self, greenlet, blocking_time, info, *, hub=None):
self.greenlet = greenlet
self.blocking_time = blocking_time
self.info = info
self.hub = hub
| EventLoopBlocked |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/sources.py | {
"start": 1844,
"end": 2139
} | class ____(str, Enum):
ADD = "ADD"
DELETE = "DELETE" # Deprecated as we don't care about backend-specific deletion
UPDATE = "UPDATE"
ADD_OR_UPDATE = "ADD_OR_UPDATE"
CrudMethodInfoFn: TypeAlias = Callable[..., Tuple[CrudMethodType, Type["Datasource"]]]
@public_api
| CrudMethodType |
python | aio-libs__aiohttp | aiohttp/abc.py | {
"start": 3972,
"end": 5084
} | class ____(Sized, Iterable[Morsel[str]]):
"""Abstract Cookie Jar."""
@property
@abstractmethod
def quote_cookie(self) -> bool:
"""Return True if cookies should be quoted."""
@abstractmethod
def clear(self, predicate: ClearCookiePredicate | None = None) -> None:
"""Clear all cookies if no predicate is passed."""
@abstractmethod
def clear_domain(self, domain: str) -> None:
"""Clear all cookies for domain and all subdomains."""
@abstractmethod
def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
"""Update cookies."""
def update_cookies_from_headers(
self, headers: Sequence[str], response_url: URL
) -> None:
"""Update cookies from raw Set-Cookie headers."""
if headers and (cookies_to_update := parse_set_cookie_headers(headers)):
self.update_cookies(cookies_to_update, response_url)
@abstractmethod
def filter_cookies(self, request_url: URL) -> BaseCookie[str]:
"""Return the jar's cookies filtered by their attributes."""
| AbstractCookieJar |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 58109,
"end": 58395
} | class ____:
xlErrorBarIncludeBoth = 1 # from enum XlErrorBarInclude
xlErrorBarIncludeMinusValues = 3 # from enum XlErrorBarInclude
xlErrorBarIncludeNone = -4142 # from enum XlErrorBarInclude
xlErrorBarIncludePlusValues = 2 # from enum XlErrorBarInclude
| ErrorBarInclude |
python | davidhalter__jedi | jedi/file_io.py | {
"start": 1779,
"end": 2195
} | class ____(file_io.KnownContentFileIO, FileIOFolderMixin):
"""For .zip and .egg archives"""
def __init__(self, path, code, zip_path):
super().__init__(path, code)
self._zip_path = zip_path
def get_last_modified(self):
try:
return os.path.getmtime(self._zip_path)
except (FileNotFoundError, PermissionError, NotADirectoryError):
return None
| ZipFileIO |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 9656,
"end": 9872
} | class ____(Exception):
"""Signal that the type inference should be rewound due to recursive types. Internal use only."""
def __init__(self, target: object) -> None:
self.target = target
| RewindRecursive |
python | ray-project__ray | rllib/examples/rl_modules/classes/vpg_using_shared_encoder_rlm.py | {
"start": 457,
"end": 1618
} | class ____(TorchRLModule):
"""A VPG (vanilla pol. gradient)-style RLModule using a shared encoder.
# __sphinx_doc_policy_end__
The shared encoder RLModule must be held by the same MultiRLModule, under which
this RLModule resides. The shared encoder's forward is called before this
RLModule's forward and returns the embeddings under the "encoder_embeddings"
key.
# __sphinx_doc_policy_2_begin__
"""
def setup(self):
super().setup()
# Incoming feature dim from the shared encoder.
embedding_dim = self.model_config["embedding_dim"]
hidden_dim = self.model_config["hidden_dim"]
self._pi_head = torch.nn.Sequential(
torch.nn.Linear(embedding_dim, hidden_dim),
torch.nn.ReLU(),
torch.nn.Linear(hidden_dim, self.action_space.n),
)
def _forward(self, batch, **kwargs):
embeddings = batch[ENCODER_OUT] # Get the output of the encoder
logits = self._pi_head(embeddings)
return {Columns.ACTION_DIST_INPUTS: logits}
# __sphinx_doc_policy_2_end__
# __sphinx_doc_mrlm_begin__
| VPGPolicyAfterSharedEncoder |
python | pytorch__pytorch | torch/_dynamo/variables/distributed.py | {
"start": 15006,
"end": 18780
} | class ____(VariableTracker):
"""
Handles torch.utils.hooks.BackwardHook for module-level backward
hooks.
"""
@staticmethod
def create(
tx: "InstructionTranslator",
module: VariableTracker,
user_hooks: VariableTracker,
user_pre_hooks: VariableTracker,
) -> "BackwardHookVariable":
if not compiled_autograd.compiled_autograd_enabled:
unimplemented(
gb_type="Module-level backwards hooks require compiled autograd.",
context="",
explanation="",
hints=[
"Enable compiled autograd by setting torch._dynamo.config.compiled_autograd = True."
],
)
def _in_graph_bw_hooks(
bw_state: BackwardState,
) -> torch.utils.hooks.BackwardHook:
"""
Rather than installing the user hooks in the graph (which
don't survive AotAutograd), we install hooks that will call
trace_wrapped in the backward pass that CompiledAutograd
can turn into actual hook calls.
"""
return torch.utils.hooks.BackwardHook(
None,
(
functools.partial(
trace_wrapped,
fn=call_module_hooks_from_backward_state,
bw_state=bw_state,
hooks_name=user_hooks_name,
module_name=module_name,
),
),
(
functools.partial(
trace_wrapped,
fn=call_module_hooks_from_backward_state,
bw_state=bw_state,
hooks_name=user_pre_hooks_name,
module_name=module_name,
),
),
)
module_name, bw_state_proxy = tx.output.add_backward_state_hook(module, "mod")
user_pre_hooks_name, _ = tx.output.add_backward_state_hook(user_pre_hooks)
user_hooks_name, _ = tx.output.add_backward_state_hook(user_hooks)
proxy = tx.output.create_proxy(
"call_function",
_in_graph_bw_hooks,
(bw_state_proxy,),
{},
)
proxy.node.meta["example_value"] = torch.utils.hooks.BackwardHook(None, (), ())
return BackwardHookVariable(proxy, module, user_hooks, user_pre_hooks)
def __init__(
self,
proxy: torch.fx.Proxy,
module: VariableTracker,
user_hooks: VariableTracker,
user_pre_hooks: VariableTracker,
**options: Any,
) -> None:
super().__init__(**options)
self.proxy = proxy
self.module = module
self.user_hooks = user_hooks
self.user_pre_hooks = user_pre_hooks
def as_proxy(self) -> torch.fx.Proxy:
return self.proxy
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if name in ("setup_input_hook", "setup_output_hook"):
return self._setup_hook(tx, name, *args, **kwargs)
return super().call_method(tx, name, args, kwargs)
def _setup_hook(
self, tx: "InstructionTranslator", hook_method_name: str, args: VariableTracker
) -> VariableTracker:
from .builder import wrap_fx_proxy
return wrap_fx_proxy(
tx,
tx.output.create_proxy(
"call_method",
hook_method_name,
(self.as_proxy(), args.as_proxy()),
{},
),
)
| BackwardHookVariable |
python | Netflix__metaflow | test/core/tests/card_refresh_test.py | {
"start": 72,
"end": 7599
} | class ____(MetaflowTest):
"""
This test Does few checks that the core user interfaces are working :
1. It validates we can call `current.card.refresh` without any errors.
2. It validates if the data updates that are getting shipped are correct.
How will it do it :
1. In step code:
1. We create a random array of strings.
2. We call `current.card.refresh` with the array.
3. We check if the card is present given that we have called referesh and the card
should have reached the backend in some short period of time
4. Keep adding new data to the array and keep calling refresh.
5. The data-update that got shipped should *atleast* be a subset the actual data present in the runtime code.
2. In check_results:
1. We check if the data that got shipped can be access post task completion
2. We check if the data that got shipped is a subset of the actual data created during the runtime code.
"""
PRIORITY = 3
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag('environment(vars={"METAFLOW_CARD_NO_WARNING": "True"})')
@tag('card(type="test_refresh_card", id="refresh_card")')
@steps(
0,
[
"singleton-start",
"sigleton-end",
"singleton",
"foreach-split-small",
"foreach-inner-small",
"foreach-join-small",
"split-and",
"single-branch-split",
"join-and",
"parallel-step",
],
)
def step_start(self):
import random
import string
def _create_random_strings(char_len):
return "".join(random.choice(string.ascii_letters) for i in range(char_len))
def _array_is_a_subset(arr1, arr2):
return set(arr1).issubset(set(arr2))
from metaflow import current
from metaflow.cards import get_cards
from metaflow.plugins.cards.card_client import Card
import random
import time
start_arr = [_create_random_strings(10) for i in range(5)]
# Calling the first refresh should trigger a render of the card.
current.card.refresh({"arr": start_arr})
# sleep for a little bit because the card refresh is async.
# This feels a little hacky but need better ideas on how to test this
# when async processes may write cards/data in a "best-effort" manner.
# The `try_to_get_card` function will keep retrying to get a card until a
# timeout value is reached. After which the function will throw a `TimeoutError`.
card = try_to_get_card(id="refresh_card")
assert_equals(isinstance(card, Card), True)
sleep_between_refreshes = 4 # Set based on the RUNTIME_CARD_MIN_REFRESH_INTERVAL which acts as a rate-limit to what is refreshed.
# Now we check if the refresh interface is working as expected from data updates.
card_data = None
for i in range(5):
# We need to put sleep statements because:
# 1. the update to cards is run via async processes
# 2. card refreshes are rate-limited by RUNTIME_CARD_MIN_REFRESH_INTERVAL so we can't validate with each update.
# There by there is no consistent way to know from during user-code when a data update
# actually got shiped.
start_arr.append(_create_random_strings(10))
current.card.refresh({"arr": start_arr})
# We call the `card.get_data` interface to validate the data is available in the card.
# This is a private interface and should not be used by users but is used by internal services.
card_data = card.get_data()
if card_data is not None:
# Assert that data is atleast subset of what we sent to the datastore.
assert_equals(
_array_is_a_subset(card_data["data"]["user"]["arr"], start_arr),
True,
)
# The `TestRefreshCard.refresh(task, data)` method returns the `data` object as a pass through.
# This test will also serve a purpose of ensuring that any changes to these keys are
# caught by the test framework. The minimum subset should be present and grown as
# need requires.
# We first check the keys created by the refresh-JSON created in the `card_cli.py`
top_level_keys = set(["data", "reload_token"])
assert_equals(top_level_keys.issubset(set(card_data.keys())), True)
# We then check the keys returned from the `current.card._get_latest_data` which is the
# `data` parameter in the `MetaflowCard.refresh ` method.
required_data_keys = set(
["mode", "component_update_ts", "components", "render_seq", "user"]
)
assert_equals(
required_data_keys.issubset(set(card_data["data"].keys())), True
)
time.sleep(sleep_between_refreshes)
assert_equals(card_data is not None, True)
self.final_data = {"arr": start_arr}
# setting step name here helps us figure out what steps should be validated by the checker
self.step_name = current.step_name
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
def _array_is_a_subset(arr1, arr2):
return set(arr1).issubset(set(arr2))
if checker.__class__.__name__ != "MetadataCheck":
return
run = checker.get_run()
for step in flow:
meta_check_dict = checker.artifact_dict_if_exists(step.name, "final_data")
# Which ever steps ran the actual card testing code
# contains the `final_data` attribute and the `step_name` attribute.
# If these exist then we can succesfully validate the card data since it is meant to exist.
step_done_check_dict = checker.artifact_dict_if_exists(
step.name, "step_name"
)
for task_id in step_done_check_dict:
if (
len(step_done_check_dict[task_id]) == 0
or step_done_check_dict[task_id]["step_name"] != step.name
):
print(
"Skipping task pathspec %s" % run[step.name][task_id].pathspec
)
continue
# If the `step_name` attribute was set then surely `final_data` will also be set;
data_obj = meta_check_dict[task_id]["final_data"]
card_present, card_data = checker.get_card_data(
step.name, task_id, "test_refresh_card", card_id="refresh_card"
)
assert_equals(card_present, True)
data_has_latest_artifact = _array_is_a_subset(
data_obj["arr"], card_data["data"]["user"]["arr"]
)
assert_equals(data_has_latest_artifact, True)
print(
"Succesfully validated task pathspec %s"
% run[step.name][task_id].pathspec
)
| CardWithRefreshTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 22800,
"end": 23285
} | class ____(graphene.Interface):
runId = graphene.NonNull(graphene.String)
stepKey = graphene.NonNull(graphene.String)
status = graphene.Field(GrapheneStepEventStatus)
startTime = graphene.Field(graphene.Float)
endTime = graphene.Field(graphene.Float)
materializations = non_null_list(GrapheneMaterializationEvent)
expectationResults = non_null_list(GrapheneExpectationResult)
class Meta:
name = "PipelineRunStepStats"
| GraphenePipelineRunStepStats |
python | gevent__gevent | src/greentest/3.14/test_httpservers.py | {
"start": 15093,
"end": 16228
} | class ____(BaseTestCase):
class request_handler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
default_request_version = 'HTTP/1.1'
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_ERROR(self):
self.send_error(HTTPStatus.NOT_FOUND, 'File not found')
def test_get(self):
self.con = http.client.HTTPConnection(self.HOST, self.PORT)
self.con.connect()
with support.captured_stderr() as err:
self.con.request('GET', '/')
self.con.getresponse()
self.assertEndsWith(err.getvalue(), '"GET / HTTP/1.1" 200 -\n')
def test_err(self):
self.con = http.client.HTTPConnection(self.HOST, self.PORT)
self.con.connect()
with support.captured_stderr() as err:
self.con.request('ERROR', '/')
self.con.getresponse()
lines = err.getvalue().split('\n')
self.assertEndsWith(lines[0], 'code 404, message File not found')
self.assertEndsWith(lines[1], '"ERROR / HTTP/1.1" 404 -')
| RequestHandlerLoggingTestCase |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/df.py | {
"start": 4755,
"end": 7670
} | class ____(BasePydanticProgram[DataFrameRowsOnly]):
"""
DF Rows output parser.
Given DF schema, extract text into a set of rows.
"""
def __init__(
self,
pydantic_program_cls: Type[BaseLLMFunctionProgram],
df_parser_template_str: str = DEFAULT_ROWS_DF_PARSER_TMPL,
column_schema: Optional[str] = None,
input_key: str = "input_str",
**program_kwargs: Any,
) -> None:
"""Init params."""
# partial format df parser template string with column schema
prompt_template_str = df_parser_template_str.replace(
"{column_schema}", column_schema or ""
)
pydantic_program = pydantic_program_cls.from_defaults(
DataFrameRowsOnly, prompt_template_str, **program_kwargs
)
self._validate_program(pydantic_program)
self._pydantic_program = pydantic_program
self._input_key = input_key
def _validate_program(self, pydantic_program: BasePydanticProgram) -> None:
if pydantic_program.output_cls != DataFrameRowsOnly:
raise ValueError(
"Output class of pydantic program must be `DataFramRowsOnly`."
)
@classmethod
def from_defaults(
cls,
pydantic_program_cls: Optional[Type[BaseLLMFunctionProgram]] = None,
df_parser_template_str: str = DEFAULT_ROWS_DF_PARSER_TMPL,
df: Optional[pd.DataFrame] = None,
column_schema: Optional[str] = None,
input_key: str = "input_str",
**kwargs: Any,
) -> "DFRowsProgram":
"""Rows DF output parser."""
pydantic_program_cls = pydantic_program_cls or FunctionCallingProgram
# either one of df or column_schema needs to be specified
if df is None and column_schema is None:
raise ValueError(
"Either `df` or `column_schema` must be specified for "
"DFRowsOutputParser."
)
# first, inject the column schema into the template string
if column_schema is None:
assert df is not None
# by default, show column schema and some example values
column_schema = ", ".join(df.columns)
return cls(
pydantic_program_cls,
df_parser_template_str=df_parser_template_str,
column_schema=column_schema,
input_key=input_key,
**kwargs,
)
@property
def output_cls(self) -> Type[DataFrameRowsOnly]:
"""Output class."""
return DataFrameRowsOnly
def __call__(self, *args: Any, **kwds: Any) -> DataFrameRowsOnly:
"""Call."""
if self._input_key not in kwds:
raise ValueError(f"Input key {self._input_key} not found in kwds.")
result = self._pydantic_program(**{self._input_key: kwds[self._input_key]})
return cast(DataFrameRowsOnly, result)
| DFRowsProgram |
python | sympy__sympy | doc/ext/docscrape.py | {
"start": 161,
"end": 1986
} | class ____:
"""
A line-based string reader.
"""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split('\n') # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ''
def seek_next_non_empty_line(self):
for l in self[self._l:]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start:self._l]
self._l += 1
if self.eof():
return self[start:self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return (line.strip() and (len(line.lstrip()) == len(line)))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ''
def is_empty(self):
return not ''.join(self._str).strip()
| Reader |
python | kamyu104__LeetCode-Solutions | Python/sequentially-ordinal-rank-tracker.py | {
"start": 101,
"end": 505
} | class ____(object):
def __init__(self):
self.__sl = SortedList()
self.__i = 0
def add(self, name, score):
"""
:type name: str
:type score: int
:rtype: None
"""
self.__sl.add((-score, name))
def get(self):
"""
:rtype: str
"""
self.__i += 1
return self.__sl[self.__i-1][1]
| SORTracker |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 22978,
"end": 26089
} | class ____(BaseModel):
type: Literal["MinMaxDatetime"]
datetime: str = Field(
...,
description="Datetime value.",
examples=["2021-01-01", "2021-01-01T00:00:00Z", "{{ config['start_time'] }}"],
title="Datetime",
)
datetime_format: Optional[str] = Field(
"",
description='Format of the datetime value. Defaults to "%Y-%m-%dT%H:%M:%S.%f%z" if left empty. Use placeholders starting with "%" to describe the format the API is using. The following placeholders are available:\n * **%s**: Epoch unix timestamp - `1686218963`\n * **%s_as_float**: Epoch unix timestamp in seconds as float with microsecond precision - `1686218963.123456`\n * **%ms**: Epoch unix timestamp - `1686218963123`\n * **%a**: Weekday (abbreviated) - `Sun`\n * **%A**: Weekday (full) - `Sunday`\n * **%w**: Weekday (decimal) - `0` (Sunday), `6` (Saturday)\n * **%d**: Day of the month (zero-padded) - `01`, `02`, ..., `31`\n * **%b**: Month (abbreviated) - `Jan`\n * **%B**: Month (full) - `January`\n * **%m**: Month (zero-padded) - `01`, `02`, ..., `12`\n * **%y**: Year (without century, zero-padded) - `00`, `01`, ..., `99`\n * **%Y**: Year (with century) - `0001`, `0002`, ..., `9999`\n * **%H**: Hour (24-hour, zero-padded) - `00`, `01`, ..., `23`\n * **%I**: Hour (12-hour, zero-padded) - `01`, `02`, ..., `12`\n * **%p**: AM/PM indicator\n * **%M**: Minute (zero-padded) - `00`, `01`, ..., `59`\n * **%S**: Second (zero-padded) - `00`, `01`, ..., `59`\n * **%f**: Microsecond (zero-padded to 6 digits) - `000000`, `000001`, ..., `999999`\n * **%z**: UTC offset - `(empty)`, `+0000`, `-04:00`\n * **%Z**: Time zone name - `(empty)`, `UTC`, `GMT`\n * **%j**: Day of the year (zero-padded) - `001`, `002`, ..., `366`\n * **%U**: Week number of the year (Sunday as first day) - `00`, `01`, ..., `53`\n * **%W**: Week number of the year (Monday as first day) - `00`, `01`, ..., `53`\n * **%c**: Date and time representation - `Tue Aug 16 21:30:00 1988`\n * **%x**: Date representation - `08/16/1988`\n * **%X**: Time representation - `21:30:00`\n * **%%**: Literal \'%\' character\n\n Some placeholders depend on the locale of the underlying system - in most cases this locale is configured as en/US. For more information see the [Python documentation](https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes).\n',
examples=["%Y-%m-%dT%H:%M:%S.%f%z", "%Y-%m-%d", "%s"],
title="Datetime Format",
)
max_datetime: Optional[str] = Field(
None,
description="Ceiling applied on the datetime value. Must be formatted with the datetime_format field.",
examples=["2021-01-01T00:00:00Z", "2021-01-01"],
title="Max Datetime",
)
min_datetime: Optional[str] = Field(
None,
description="Floor applied on the datetime value. Must be formatted with the datetime_format field.",
examples=["2010-01-01T00:00:00Z", "2010-01-01"],
title="Min Datetime",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
| MinMaxDatetime |
python | getsentry__sentry | src/sentry/models/apigrant.py | {
"start": 925,
"end": 3820
} | class ____(Model):
"""
A grant represents a token with a short lifetime that can
be swapped for an access token, as described in :rfc:`4.1.2`
of the OAuth 2 spec.
"""
__relocation_scope__ = RelocationScope.Global
user = FlexibleForeignKey("sentry.User")
application = FlexibleForeignKey("sentry.ApiApplication")
code = models.CharField(max_length=64, db_index=True, default=generate_code)
expires_at = models.DateTimeField(db_index=True, default=default_expiration)
redirect_uri = models.CharField(max_length=255)
scopes = typed_dict_bitfield(
TypedDict( # type: ignore[operator]
"scopes",
{
"project:read": bool,
"project:write": bool,
"project:admin": bool,
"project:releases": bool,
"team:read": bool,
"team:write": bool,
"team:admin": bool,
"event:read": bool,
"event:write": bool,
"event:admin": bool,
"org:read": bool,
"org:write": bool,
"org:admin": bool,
"member:read": bool,
"member:write": bool,
"member:admin": bool,
"openid": bool,
"profile": bool,
"email": bool,
},
)
)
scope_list = ArrayField(models.TextField(), default=list)
# API applications should ideally get access to only one organization of user
# If null, the grant is about user level access and not org level
organization_id = HybridCloudForeignKey(
"sentry.Organization",
db_index=True,
null=True,
on_delete="CASCADE",
)
class Meta:
app_label = "sentry"
db_table = "sentry_apigrant"
def __str__(self) -> str:
return (
f"api_grant_id={self.id}, user_id={self.user.id}, application_id={self.application.id}"
)
def get_scopes(self):
if self.scope_list:
return self.scope_list
return [k for k, v in self.scopes.items() if v]
def has_scope(self, scope):
return scope in self.get_scopes()
def is_expired(self):
return timezone.now() >= self.expires_at
def redirect_uri_allowed(self, uri):
return uri == self.redirect_uri
@classmethod
def get_lock_key(cls, grant_id) -> str:
return f"api_grant:{grant_id}"
@classmethod
def sanitize_relocation_json(
cls, json: Any, sanitizer: Sanitizer, model_name: NormalizedModelName | None = None
) -> None:
model_name = get_model_name(cls) if model_name is None else model_name
super().sanitize_relocation_json(json, sanitizer, model_name)
sanitizer.set_string(json, SanitizableField(model_name, "code"), lambda _: generate_code())
| ApiGrant |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-milvus/destination_milvus/indexer.py | {
"start": 749,
"end": 6674
} | class ____(Indexer):
config: MilvusIndexingConfigModel
def __init__(self, config: MilvusIndexingConfigModel, embedder_dimensions: int):
super().__init__(config)
self.embedder_dimensions = embedder_dimensions
def _connect(self):
connections.connect(
uri=self.config.host,
db_name=self.config.db if self.config.db else "",
user=self.config.auth.username if self.config.auth.mode == "username_password" else "",
password=self.config.auth.password if self.config.auth.mode == "username_password" else "",
token=self.config.auth.token if self.config.auth.mode == "token" else "",
)
def _connect_with_timeout(self):
# Run connect in a separate process as it will hang if the token is invalid.
proc = Process(target=self._connect)
proc.start()
proc.join(5)
if proc.is_alive():
# If the process is still alive after 5 seconds, terminate it and raise an exception
proc.terminate()
proc.join()
raise Exception("Connection timed out, check your host and credentials")
def _create_index(self, collection: Collection):
"""
Create an index on the vector field when auto-creating the collection.
This uses an IVF_FLAT index with 1024 clusters. This is a good default for most use cases. If more control is needed, the index can be created manually (this is also stated in the documentation)
"""
collection.create_index(
field_name=self.config.vector_field, index_params={"metric_type": "L2", "index_type": "IVF_FLAT", "params": {"nlist": 1024}}
)
def _create_client(self):
self._connect_with_timeout()
# If the process exited within 5 seconds, it's safe to connect on the main process to execute the command
self._connect()
if not utility.has_collection(self.config.collection):
pk = FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True)
vector = FieldSchema(name=self.config.vector_field, dtype=DataType.FLOAT_VECTOR, dim=self.embedder_dimensions)
schema = CollectionSchema(fields=[pk, vector], enable_dynamic_field=True)
collection = Collection(name=self.config.collection, schema=schema)
self._create_index(collection)
self._collection = Collection(self.config.collection)
self._collection.load()
self._primary_key = self._collection.primary_field.name
def check(self) -> Optional[str]:
deployment_mode = os.environ.get("DEPLOYMENT_MODE", "")
if deployment_mode.casefold() == CLOUD_DEPLOYMENT_MODE and not self._uses_safe_config():
return "Host must start with https:// and authentication must be enabled on cloud deployment."
try:
self._create_client()
description = self._collection.describe()
if not description["auto_id"]:
return "Only collections with auto_id are supported"
vector_field = next((field for field in description["fields"] if field["name"] == self.config.vector_field), None)
if vector_field is None:
return f"Vector field {self.config.vector_field} not found"
if vector_field["type"] != DataType.FLOAT_VECTOR:
return f"Vector field {self.config.vector_field} is not a vector"
if vector_field["params"]["dim"] != self.embedder_dimensions:
return f"Vector field {self.config.vector_field} is not a {self.embedder_dimensions}-dimensional vector"
except Exception as e:
return format_exception(e)
return None
def _uses_safe_config(self) -> bool:
return self.config.host.startswith("https://") and not self.config.auth.mode == "no_auth"
def pre_sync(self, catalog: ConfiguredAirbyteCatalog) -> None:
self._create_client()
for stream in catalog.streams:
if stream.destination_sync_mode == DestinationSyncMode.overwrite:
self._delete_for_filter(f'{METADATA_STREAM_FIELD} == "{create_stream_identifier(stream.stream)}"')
def _delete_for_filter(self, expr: str) -> None:
iterator = self._collection.query_iterator(expr=expr)
page = iterator.next()
while len(page) > 0:
id_field = next(iter(page[0].keys()))
ids = [next(iter(entity.values())) for entity in page]
id_list_expr = ", ".join([str(id) for id in ids])
self._collection.delete(expr=f"{id_field} in [{id_list_expr}]")
page = iterator.next()
def _normalize(self, metadata: dict) -> dict:
result = {}
for key, value in metadata.items():
normalized_key = key
# the primary key can't be set directly with auto_id, so we prefix it with an underscore
if key == self._primary_key:
normalized_key = f"_{key}"
result[normalized_key] = value
return result
def index(self, document_chunks, namespace, stream):
entities = []
for i in range(len(document_chunks)):
chunk = document_chunks[i]
entity = {
**self._normalize(chunk.metadata),
self.config.vector_field: chunk.embedding,
self.config.text_field: chunk.page_content,
}
if chunk.page_content is not None:
entity[self.config.text_field] = chunk.page_content
entities.append(entity)
self._collection.insert(entities)
def delete(self, delete_ids, namespace, stream):
if len(delete_ids) > 0:
id_list_expr = ", ".join([f'"{id}"' for id in delete_ids])
id_expr = f"{METADATA_RECORD_ID_FIELD} in [{id_list_expr}]"
self._delete_for_filter(id_expr)
| MilvusIndexer |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/data_service_ops.py | {
"start": 2147,
"end": 5347
} | class ____(enum.IntEnum):
"""Specifies how to shard data among tf.data service workers.
OFF: No sharding will be performed. Each worker produces the entire dataset
without any sharding. With this mode, the best practice is to shuffle the
dataset nondeterministically so that workers process the dataset in different
orders. If workers are restarted or join the cluster mid-job, they will begin
processing the dataset from the beginning.
DYNAMIC: The input dataset is dynamically split among workers at runtime. Each
worker gets the next split when it reads data from the dispatcher. Data is
produced non-deterministically in this mode. Dynamic sharding works well with
varying-sized tf.data service clusters, e.g., when you need to auto-scale your
workers. Dynamic sharding provides at-most once visitation guarantees. No
examples will be repeated, but some may be missed if a tf.data service worker
gets restarted while processing a file.
The following are static sharding policies. The semantics are similar to
`tf.data.experimental.AutoShardPolicy`. These policies require:
* The tf.data service cluster is configured with a fixed list of workers
in DispatcherConfig.
* Each client only reads from the local tf.data service worker.
If a worker is restarted while performing static sharding, the worker will
begin processing its shard again from the beginning.
FILE: Shards by input files (i.e. each worker will get a fixed set of files to
process). When this option is selected, make sure that there is at least as
many files as workers. If there are fewer input files than workers, a runtime
error will be raised.
DATA: Shards by elements produced by the dataset. Each worker will process the
whole dataset and discard the portion that is not for itself. Note that for
this mode to correctly partition the dataset elements, the dataset needs to
produce elements in a deterministic order.
FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based
sharding on failure.
HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
placeholder to replace with `shard(num_workers, worker_index)`.
"""
# LINT.IfChange(tf_data_service_sharding_policy)
OFF = 0
DYNAMIC = 1
FILE = 2
DATA = 3
FILE_OR_DATA = 4
HINT = 5
# LINT.ThenChange()
def _to_proto(self) -> data_service_pb2.ProcessingModeDef.ShardingPolicy:
"""Converts the policy to ProcessingModeDef proto enum."""
if self == ShardingPolicy.OFF:
return data_service_pb2.ProcessingModeDef.OFF
if self == ShardingPolicy.DYNAMIC:
return data_service_pb2.ProcessingModeDef.DYNAMIC
if self == ShardingPolicy.FILE:
return data_service_pb2.ProcessingModeDef.FILE
if self == ShardingPolicy.DATA:
return data_service_pb2.ProcessingModeDef.DATA
if self == ShardingPolicy.FILE_OR_DATA:
return data_service_pb2.ProcessingModeDef.FILE_OR_DATA
if self == ShardingPolicy.HINT:
return data_service_pb2.ProcessingModeDef.HINT
raise ValueError(f"Unable to convert sharding policy {self!r} to proto.")
@tf_export("data.experimental.service.CrossTrainerCache")
| ShardingPolicy |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/model_views.py | {
"start": 9242,
"end": 15878
} | class ____(DisableListEndpoint, UpdateModelMixin, UserSelectViewSet):
permission_classes = [HasBuildAPIKey | ReadOnlyPermission]
renderer_classes = (JSONRenderer, PlainTextBuildRenderer)
model = Build
filterset_fields = ("project__slug", "commit")
def get_serializer_class(self):
"""
Return the proper serializer for UI and Admin.
This ViewSet has a sligtly different pattern since we want to
pre-process the `command` field before returning it to the user, and we
also want to have a specific serializer for admins.
"""
if self.request.build_api_key:
# Logic copied from `UserSelectViewSet.get_serializer_class`
# and extended to choose serializer from self.action
if self.action not in ["list", "retrieve"]:
return BuildAdminSerializer # Staff write-only
return BuildAdminReadOnlySerializer # Staff read-only
return BuildSerializer # Non-staff
@decorators.action(
detail=False,
permission_classes=[HasBuildAPIKey],
methods=["get"],
)
def concurrent(self, request, **kwargs):
project_slug = request.GET.get("project__slug")
build_api_key = request.build_api_key
if project_slug != build_api_key.project.slug:
log.warning(
"Project slug doesn't match the one attached to the API key.",
api_key_id=build_api_key.id,
project_slug=project_slug,
)
raise Http404()
project = build_api_key.project
limit_reached, concurrent, max_concurrent = Build.objects.concurrent(project)
data = {
"limit_reached": limit_reached,
"concurrent": concurrent,
"max_concurrent": max_concurrent,
}
return Response(data)
@decorators.action(
detail=True,
# We make this endpoint public because we don't want to expose the build API key inside the user's container.
# To emulate "auth" we check for the builder hostname to match the `Build.builder` defined in the database.
permission_classes=[],
# We can't use the default `get_queryset()` method because it's filtered by build API key and/or user access.
# Since we don't want to check for permissions here we need to use a custom queryset here.
get_queryset=lambda: Build.objects.all(),
methods=["post"],
)
def healthcheck(self, request, **kwargs):
build = self.get_object()
builder_hostname = request.GET.get("builder")
structlog.contextvars.bind_contextvars(
build_id=build.pk,
project_slug=build.project.slug,
builder_hostname=builder_hostname,
)
log.info("Healthcheck received.")
if build.state in BUILD_FINAL_STATES or build.builder != builder_hostname:
log.warning(
"Build is not running anymore or builder hostname doesn't match.",
)
raise Http404()
build.healthcheck = timezone.now()
build.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def retrieve(self, *args, **kwargs):
"""
Retrieves command data from storage.
This uses files from storage to get the JSON,
and replaces the ``commands`` part of the response data.
"""
if not settings.RTD_SAVE_BUILD_COMMANDS_TO_STORAGE:
return super().retrieve(*args, **kwargs)
instance = self.get_object()
serializer = self.get_serializer(instance)
data = serializer.data
if instance.cold_storage:
storage_path = "{date}/{id}.json".format(
date=str(instance.date.date()),
id=instance.id,
)
if build_commands_storage.exists(storage_path):
try:
json_resp = build_commands_storage.open(storage_path).read()
data["commands"] = json.loads(json_resp)
# Normalize commands in the same way than when returning
# them using the serializer
for buildcommand in data["commands"]:
buildcommand["command"] = normalize_build_command(
buildcommand["command"],
instance.project.slug,
instance.get_version_slug(),
)
except Exception:
log.exception(
"Failed to read build data from storage.",
path=storage_path,
)
return Response(data)
@decorators.action(
detail=True,
permission_classes=[HasBuildAPIKey],
methods=["post"],
)
def reset(self, request, **kwargs):
"""Reset the build so it can be re-used when re-trying."""
instance = self.get_object()
instance.reset()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_queryset_for_api_key(self, api_key):
return self.model.objects.filter(project=api_key.project)
@decorators.action(
detail=True,
permission_classes=[HasBuildAPIKey],
methods=["post"],
url_path="credentials/storage",
)
def credentials_for_storage(self, request, **kwargs):
"""
Generate temporary credentials for interacting with storage.
This can generate temporary credentials for interacting with S3 only for now.
"""
build = self.get_object()
credentials_type = request.data.get("type")
if credentials_type == "build_media":
method = get_s3_build_media_scoped_credentials
# 30 minutes should be enough for uploading build artifacts.
duration = 30 * 60
elif credentials_type == "build_tools":
method = get_s3_build_tools_scoped_credentials
# 30 minutes should be enough for downloading build tools.
duration = 30 * 60
else:
return Response(
{"error": "Invalid storage type"},
status=status.HTTP_400_BAD_REQUEST,
)
try:
credentials = method(build=build, duration=duration)
except AWSTemporaryCredentialsError:
return Response(
{"error": "Failed to generate temporary credentials"},
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
)
return Response({"s3": asdict(credentials)})
| BuildViewSet |
python | walkccc__LeetCode | solutions/1031. Maximum Sum of Two Non-Overlapping Subarrays/1031.py | {
"start": 0,
"end": 795
} | class ____:
def maxSumTwoNoOverlap(
self,
nums: list[int],
firstLen: int,
secondLen: int,
) -> int:
def helper(l: int, r: int) -> int:
n = len(nums)
left = [0] * n
summ = 0
for i in range(n):
summ += nums[i]
if i >= l:
summ -= nums[i - l]
if i >= l - 1:
left[i] = max(left[i - 1], summ) if i > 0 else summ
right = [0] * n
summ = 0
for i in reversed(range(n)):
summ += nums[i]
if i <= n - r - 1:
summ -= nums[i + r]
if i <= n - r:
right[i] = max(right[i + 1], summ) if i < n - 1 else summ
return max(left[i] + right[i + 1] for i in range(n - 1))
return max(helper(firstLen, secondLen), helper(secondLen, firstLen))
| Solution |
python | walkccc__LeetCode | solutions/3027. Find the Number of Ways to Place People II/3027.py | {
"start": 0,
"end": 639
} | class ____:
# Same as 3025. Find the Number of Ways to Place People I
def numberOfPairs(self, points: list[list[int]]) -> int:
ans = 0
points.sort(key=lambda x: (x[0], -x[1]))
for i, (_, yi) in enumerate(points):
maxY = -math.inf
for j in range(i + 1, len(points)):
_, yj = points[j]
# Chisato is in the upper-left corner at (xi, yi), and Takina is in the
# lower-right corner at (xj, yj). Also, if yj > maxY, it means that
# nobody other than Chisato and Takina is inside or on the fence.
if yi >= yj > maxY:
ans += 1
maxY = yj
return ans
| Solution |
python | apache__airflow | task-sdk/src/airflow/sdk/types.py | {
"start": 2099,
"end": 4183
} | class ____(Protocol):
"""Minimal interface for a task instance available during the execution."""
id: uuid.UUID
dag_version_id: uuid.UUID
task: BaseOperator
task_id: str
dag_id: str
run_id: str
try_number: int
map_index: int | None
max_tries: int
hostname: str | None = None
start_date: AwareDatetime
end_date: AwareDatetime | None = None
state: TaskInstanceState | None = None
def xcom_pull(
self,
task_ids: str | list[str] | None = None,
dag_id: str | None = None,
key: str = BaseXCom.XCOM_RETURN_KEY,
include_prior_dates: bool = False,
*,
map_indexes: int | Iterable[int] | None | ArgNotSet = NOTSET,
default: Any = None,
run_id: str | None = None,
) -> Any: ...
def xcom_push(self, key: str, value: Any) -> None: ...
def get_template_context(self) -> Context: ...
def get_first_reschedule_date(self, first_try_number) -> AwareDatetime | None: ...
def get_previous_dagrun(self, state: str | None = None) -> DagRunProtocol | None: ...
@staticmethod
def get_ti_count(
dag_id: str,
map_index: int | None = None,
task_ids: list[str] | None = None,
task_group_id: str | None = None,
logical_dates: list[AwareDatetime] | None = None,
run_ids: list[str] | None = None,
states: list[str] | None = None,
) -> int: ...
@staticmethod
def get_task_states(
dag_id: str,
map_index: int | None = None,
task_ids: list[str] | None = None,
task_group_id: str | None = None,
logical_dates: list[AwareDatetime] | None = None,
run_ids: list[str] | None = None,
) -> dict[str, Any]: ...
@staticmethod
def get_dr_count(
dag_id: str,
logical_dates: list[AwareDatetime] | None = None,
run_ids: list[str] | None = None,
states: list[str] | None = None,
) -> int: ...
@staticmethod
def get_dagrun_state(dag_id: str, run_id: str) -> str: ...
| RuntimeTaskInstanceProtocol |
python | django__django | django/db/models/functions/math.py | {
"start": 5951,
"end": 6048
} | class ____(NumericOutputFieldMixin, Transform):
function = "SQRT"
lookup_name = "sqrt"
| Sqrt |
python | tensorflow__tensorflow | tensorflow/python/checkpoint/async_checkpoint_helper.py | {
"start": 5834,
"end": 26199
} | class ____:
"""Helper class for async checkpoint."""
def __init__(self, checkpointer_impl, root=None, **kwargs):
"""Initialize AsyncCheckpoint.
Args:
checkpointer_impl: The Checkpoint class to power the AsyncCheckpoint.
root: The root object to checkpoint. `root` may be a trackable object or
`WeakRef` of a trackable object.
**kwargs: The keyword arguments representing the checkpointed variables.
Raises:
AttributeError: when checkpointer_impl is None.
"""
# TODO(chienchunh): Make sure the processing for the root object is
# consistent when integrating with the public API, e.g., adding all kwarg
# items as the child of the root object.
if root:
trackable_root = root() if isinstance(root, weakref.ref) else root
kwargs["root"] = trackable_root
trackable_root._maybe_initialize_trackable()
# The underlying Checkpoint instance and its items.
if checkpointer_impl is None:
raise AttributeError(
"checkpointer_impl cannot be None for AsyncCheckpointHelper."
)
self._checkpointer_impl = checkpointer_impl
self._checkpoint_items = kwargs
self._checkpoint = None
self.checkpointer()
self._checkpoint_options = None
# Indicate whether async checkpoint has finished traversing the variable
# list and created the object map between the original and copied variables.
self._initialized = False
# The list of all nodes from the original checkpoint items.
# TODO(chienchunh): Consider changing this to local variable.
self._original_nodes = None
# The mapping between the original and the copied resource variables.
# The copied variables are used for the underlying checkpointing.
self._object_map = None
# A list of TPUEmbedding objects included in the checkpoint items.
self._tpu_embedding_objects = None
# A list of highest level `Trackable`s we will copy; does not contain
# TPUEmbedding objects
self._saveable_trackables = None
self._default_device = device_util.current() or "CPU:0"
self._default_device = device_util.canonicalize(self._default_device)
self._save_file_prefix = None
self._use_checkpoint_save = False
self._async_save_thread = None
# Concurrent queue that coordinates the events for writing/reading the
# cpu-copied variables. A 'True' in the queue triggers the async thread to
# perform saving; a 'False' breaks the while loop so that the async thread
# exits; no other values will be added to the queue.
# Maxsize is set to 1 only to ensure the exit procedure. We could have used
# queue.join() in _join_async_save_thread(), but queue.join() does not have
# a timeout argument. Hence we use queue.put(timeout=300), in case the last
# checkpoint takes forever. To achieve that, maxsize needs to be 1.
self._queue = queue.Queue(maxsize=1)
# Register to join the async save thread upon exit.
atexit.register(self._join_async_save_thread)
self._async_error = None
global _END_TIME_OF_LAST_ASYNC_WRITE
with _END_TIME_OF_LAST_ASYNC_WRITE_LOCK:
if _END_TIME_OF_LAST_ASYNC_WRITE is None:
_END_TIME_OF_LAST_ASYNC_WRITE = time.time()
@def_function.function
def _copy_to_cpu(self):
"""Copy the checkpointed variables from the accelerator to the host CPU.
TODO(chienchunh): Get the concrete function before firstly called to avoid
hangining the accelerators idle during function tracing.
"""
for t in self._saveable_trackables:
try:
t._copy_trackable_to_cpu(object_map=self._object_map) # pylint: disable=protected-access
except NotImplementedError as e:
logging.warning("Trackable %s skipped due to: %s", t, e)
for tpu_embedding in self._tpu_embedding_objects:
tpu_embedding._retrieve_variables() # pylint: disable=protected-access
def checkpointer(self):
"""Gets or creates the underlying Checkpoint instance."""
if self._checkpoint is None:
self._checkpoint = self._checkpointer_impl(**self._checkpoint_items)
return self._checkpoint
def _ensure_initialized(self):
"""Initialize the async checkpoint internal state."""
# This map will be used to store the CPU copy of all checkpointable objects
self._object_map = object_identity.ObjectIdentityDictionary()
self._tpu_embedding_objects = []
# Populate self._all_tracakbles, but exclude the checkpoint instance itself
# and its save_counter, as they will be returned by `descendants()`.
exclude_set = object_identity.ObjectIdentitySet()
exclude_set.add(self.checkpointer())
exclude_set.add(self.checkpointer().save_counter)
self._saveable_trackables, all_trackables = _get_all_trackables(
root=self.checkpointer(), exclude_set=exclude_set)
# Handle special cases: TPU Embedding, and slot variables.
# 1. TPUEmbedding: Different from other trackables, TPUEmbedding needs to
# call `_retrieve_variables` to checkpoint, while populating a dummy copy to
# the object map.
# 2. Slot variables: they need to be handled differently as they cannot be
# retrieved from `TrackableView.descendants()`.
# Note: dir() is used rather than hasattr() here to avoid triggering
# custom __getattr__ code, see b/152031870 for context.
for t in all_trackables:
# Special case 1: TPU Embedding, populate object_map here
# Special case 1: Handle TPU Embedding by addnig a dummy instance to the
# object map. Also add TPUEmbedding to separate list for special handling
# with values copy.
if hasattr(type(t), _TPU_EMBEDDING_ATTR):
self._handle_tpu_embedding(t)
# Special case 2: handle slot variables. The object_map is populated later
# when the variable values are being copied to host CPU for the first
# time.
if "get_slot_names" in dir(t):
slot_names = t.get_slot_names()
for slot_name in slot_names:
for original_variable in all_trackables:
if not isinstance(original_variable, variables.Variable):
continue
try:
# Usage of hasattr may result in KeyError
original_slot_variable = t.get_slot(original_variable, slot_name)
except (AttributeError, KeyError):
continue
if isinstance(original_slot_variable, base.Trackable):
self._saveable_trackables.append(original_slot_variable)
# Initiate the underlying Checkpoint instance's save_counter.
save_counter = self.checkpointer().save_counter.numpy()
logging.info("Initializing async checkpoint's save_counter: %d",
save_counter)
# Pass the object map of the copied variables to the underlying Checkpoint.
self.checkpointer()._saver._object_map = self._object_map # pylint: disable=protected-access
# We perform a `_copy_to_cpu()` to populate `self._object_map`,
# initializing copies. We do not call `self._copy_to_cpu()` directly
# because it is a tf function, which leads to access out of scope error.
# TODO(charlieruan) Figure out a better work around to solve the access
# out of scope error.
for t in self._saveable_trackables:
try:
t._copy_trackable_to_cpu(object_map=self._object_map) # pylint: disable=protected-access
except NotImplementedError as e:
logging.warning("Trackable %s skipped due to: %s", t, e)
for tpu_embedding in self._tpu_embedding_objects:
tpu_embedding._retrieve_variables() # pylint: disable=protected-access
# Initiate the async thread for checkpoint saving.
self._async_save_thread = threading.Thread(
target=self._async_save, daemon=True)
self._async_save_thread.start()
self._initialized = True
def _check_async_thread_error(self):
"""Expose the most recent error from the async saving thread to the caller.
"""
if self._async_error:
e = self._async_error
self._async_error = None
logging.error("Propagating the most recent error from the async thread "
"before joining: %s", str(e))
raise e
def _join_async_save_thread(self):
"""Join the async save thread.
The steps for terminating the async save thread:
1). Put will succeed when the last async save event is done. Putting a false
triggers the async save thread's while loop to end. We use put instead
of sync because sync does not have a timeout argument.
2). Join the async save thread. (The thread may finish before joining.)
"""
try:
self._queue.put(False, timeout=300) # Step-1.
logging.info("Joining the async save thread.")
if self._async_save_thread is not None:
self._async_save_thread.join() # Step-2.
except queue.Full:
logging.error("Timeout waiting for the async save thread; terminating the"
" thread instead. The last checkpoint may be incomeplete.")
finally:
self._check_async_thread_error()
def _async_save(self):
"""The thread function for the async checkpoint save."""
with context.executor_scope(
executor.new_executor(
enable_async=False, enable_streaming_enqueue=False)):
# The main thread inserts: a True to the queue when the user calls save,
# triggering async save; and a False when we exit the Checkpoint instance.
while self._queue.get():
logging.info("Starting async checkpoint save on the device: %s",
self._default_device)
async_save_start_time = time.time()
# Specify the ops placement on the worker if running with
# coordinator-worker mode. This is required as launching a new thread
# would clear the placement policy and make localhost the default
# placement, while the main thread's default placement would be the
# master worker's CPU:0.
try:
with ops.device(self._default_device):
with checkpoint_context.async_metrics_context():
if self._use_checkpoint_save:
self.checkpointer().save(
self._save_file_prefix, self._checkpoint_options
)
else:
self.checkpointer()._write( # pylint: disable=protected-access
self._save_file_prefix,
options=self._checkpoint_options,
)
except Exception as e: # # pylint: disable=broad-except
self._async_error = e
finally:
self._queue.task_done()
async_save_end_time = time.time()
metrics.AddAsyncCheckpointWriteDuration(
api_label=_ASYNC_CHECKPOINT,
microseconds=_get_duration_microseconds(async_save_start_time,
async_save_end_time))
# Measure the elapsed time since the last checkpoint.
# Due to the nature of async checkpoint, here it actually captures the
# duration between the start_time of the previous checkpoint and the
# start time of this checkpoint. As a result, the duration of the final
# async checkpoint is excluded, which is fine since it does not take
# much time.
global _END_TIME_OF_LAST_ASYNC_WRITE
with _END_TIME_OF_LAST_ASYNC_WRITE_LOCK:
metrics.AddTrainingTimeSaved(
api_label=_ASYNC_CHECKPOINT,
microseconds=_get_duration_microseconds(
_END_TIME_OF_LAST_ASYNC_WRITE, async_save_start_time))
_END_TIME_OF_LAST_ASYNC_WRITE = async_save_start_time
logging.info("Async save thread reached the end of the execution.")
def _handle_tpu_embedding(self, tpu_embedding):
"""Handle TPUEmbedding.
This is the only place where we populate object map in the class of
`AsyncCheckpointHelper`. For all other checkpointable trackables, we
populate object map using the trackable's own `_copy_trackable_to_cpu()`.
Args:
tpu_embedding: TPUEmbedding object to be handled.
Raises:
AttributeError: if the input trackable is not TPUEmbedding type.
"""
if not hasattr(type(tpu_embedding), _TPU_EMBEDDING_ATTR) or not callable(
tpu_embedding._create_copy_for_async_checkpoint # pylint: disable=protected-access
):
raise AttributeError(
"Expecting TPUEmbedding type; got %s" % type(tpu_embedding)
)
# Create a dummy TPUEmbedding object and add it to the object_map. This is
# to prevent the TPUEmbedding's save_callback from being triggered because
# the embedding values have already being retrieved by AsyncCheckpoint.
# pylint: disable=protected-access
new_embedding = tpu_embedding._create_copy_for_async_checkpoint(
feature_config=tpu_embedding._feature_config,
optimizer=tpu_embedding._table_config[0]
if tpu_embedding._table_config
else None,
pipeline_execution_with_tensor_core=tpu_embedding._pipeline_execution_with_tensor_core,
)
self._object_map[tpu_embedding] = new_embedding
# pylint: enable=protected-access
if tpu_embedding not in self._tpu_embedding_objects:
self._tpu_embedding_objects.append(tpu_embedding)
@property
def save_counter(self):
"""An integer variable numbering the checkpoint events.
This is maintained by the underlying tf.train.Checkpoint object employed by
AsyncCheckpoint class. The number starts at 0 and gets incremented for each
checkpoint event.
Returns:
The save counter variable.
"""
return self.checkpointer().save_counter
def write(self, save_path, options=None):
"""Save the checkpointed variables.
Args:
save_path: The file prefix of the checkpoint file.
options: Optional CheckpointOption instance.
Returns:
The full path of the checkpoint file.
"""
return self._write(save_path, options)
def _write(self, save_path, options=None):
"""Save the checkpointed variables.
This method has exactly the same logic as save(), except it does not
increment the underlying save_counter, which is done by the caller, e.g.,
CheckpointManager.
Args:
save_path: The file prefix of the checkpoint file.
options: Optional CheckpointOption instance.
Returns:
The full path of the checkpoint file.
"""
write_start_time = time.time()
if not self._initialized:
self._ensure_initialized()
else:
# First wait for async thread to finish the previous save, then copy the
# variable values to the host CPU.
self._queue.join()
self._copy_to_cpu()
# Surface the error from the async thread, if any.
# This step should come after the sem acquisition step in the above, so that
# it makes sure it waits until the previous async save finishes storing the
# error.
self._check_async_thread_error()
# Trigger the async thread to checkpoint the cpu-copied variables.
# Need to wait until the weight copying finishes before checkpoint save.
context.async_wait()
self._save_file_prefix = save_path
self._use_checkpoint_save = False
# Ensure that we do not request async checkpointing to the underlying
# checkpointer as this could lead to an infinite loop.
self._checkpoint_options = copy.copy(options) if options else None
if self._checkpoint_options:
self._checkpoint_options.experimental_enable_async_checkpoint = False
self._queue.put(True) # Trigger save in async thread
write_end_time = time.time()
metrics.AddCheckpointWriteDuration(
api_label=_ASYNC_CHECKPOINT,
microseconds=_get_duration_microseconds(write_start_time,
write_end_time))
return save_path
def save(self, save_path, options=None):
"""Save the checkpointed variables.
Args:
save_path: The file prefix of the checkpoint file.
options: Optional CheckpointOption instance.
Returns:
The full path of the checkpoint file.
"""
save_start_time = time.time()
# If this is the first time that AsyncCheckpoint.save() is called,
# initialize the internal states like `self._saveable_trackables`. We also
# populate `self._object_map` (i.e. initializing the cpu-copied variables
# and copy over the value for the first time) by essentially performing a
# `self._copy_to_cpu()`, hence the if-else logic here.
#
# This is not performed in the initializer because some variables, e.g.,
# slot variables of the optimizer, were not created until actually running
# the train function, so we could only get the complete list of the
# variables after some train steps were run.
if not self._initialized:
self._ensure_initialized()
else:
# First wait for async thread to finish the previous save, then copy the
# variable values to the host CPU.
self._queue.join()
self._copy_to_cpu()
# Surface the error from the async thread, if any.
# This step should come after the sem acquisition step in the above, so that
# it makes sure it waits until the previous async save finishes storing the
# error.
self._check_async_thread_error()
# Retrieve the save counter from the underlying checkpoint object to
# re-construct the full path of the checkpoint file.
# This step has to happen before triggering the underlying checkpoint;
# otherwise, the save_counter value may or may not have been updated.
save_counter = self.checkpointer().save_counter.numpy() + 1
full_path = "{}-{}".format(save_path, save_counter)
# Trigger the async thread to checkpoint the cpu-copied variables.
# Need to wait until the weight copying finishes before checkpoint save.
context.async_wait()
self._save_file_prefix = save_path
self._use_checkpoint_save = True
# Ensure that we do not request async checkpointing to the underlying
# checkpointer as this could lead to an infinite loop.
self._checkpoint_options = copy.copy(options) if options else None
if self._checkpoint_options:
self._checkpoint_options.experimental_enable_async_checkpoint = False
self._queue.put(True) # Trigger save in async thread
save_end_time = time.time()
metrics.AddCheckpointWriteDuration(
api_label=_ASYNC_CHECKPOINT,
microseconds=_get_duration_microseconds(save_start_time, save_end_time))
return full_path
def read(self, save_path, options=None):
"""Restore the checkpointed variables.
This method has exactly the same logic as restore(). This method is
implemented only to fulfill the duty of subclassing tf.train.Checkpoint.
Args:
save_path: The full name of the checkpoint file to be restored.
options: CheckpointOption instance.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration. See tf.train.Checkpoint.restore()
for more details.
"""
return self.restore(save_path, options)
def restore(self, save_path, options=None):
"""Restore the checkpointed variables.
Args:
save_path: The full name of the checkpoint file to be restored.
options: CheckpointOption instance.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration. See tf.train.Checkpoint.restore()
for more details.
"""
# Ensure that we do not request async checkpointing to the underlying
# checkpointer as this could lead to an infinite loop.
self._checkpoint_options = (
copy.copy(options) if options else self._checkpoint_options)
if self._checkpoint_options:
self._checkpoint_options.experimental_enable_async_checkpoint = False
# Wait for any ongoing checkpoint event to finish.
self._queue.join()
# Restore values of the cpu-copied variables directly back to accelerators
status = self.checkpointer().restore(save_path, self._checkpoint_options)
return status
def sync(self):
"""Sync on any ongoing save or restore events."""
self._queue.join()
logging.info("Sync on ongoing save/restore.")
| AsyncCheckpointHelper |
python | tensorflow__tensorflow | tensorflow/python/util/protobuf/compare_test.py | {
"start": 1145,
"end": 10162
} | class ____(googletest.TestCase):
def assertNotEquals(self, a, b):
"""Asserts that ProtoEq says a != b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEqual(self, compare.ProtoEq(a, b), False)
def assertEqual(self, a, b):
"""Asserts that ProtoEq says a == b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEqual(self, compare.ProtoEq(a, b), True)
def testPrimitives(self):
googletest.TestCase.assertEqual(self, True, compare.ProtoEq('a', 'a'))
googletest.TestCase.assertEqual(self, False, compare.ProtoEq('b', 'a'))
def testEmpty(self):
self.assertEqual('', '')
def testPrimitiveFields(self):
self.assertNotEqual('string_: "a"', '')
self.assertEqual('string_: "a"', 'string_: "a"')
self.assertNotEqual('string_: "b"', 'string_: "a"')
self.assertNotEqual('string_: "ab"', 'string_: "aa"')
self.assertNotEqual('int64_: 0', '')
self.assertEqual('int64_: 0', 'int64_: 0')
self.assertNotEqual('int64_: -1', '')
self.assertNotEqual('int64_: 1', 'int64_: 0')
self.assertNotEqual('int64_: 0', 'int64_: -1')
self.assertNotEqual('float_: 0.0', '')
self.assertEqual('float_: 0.0', 'float_: 0.0')
self.assertNotEqual('float_: -0.1', '')
self.assertNotEqual('float_: 3.14', 'float_: 0')
self.assertNotEqual('float_: 0', 'float_: -0.1')
self.assertEqual('float_: -0.1', 'float_: -0.1')
self.assertNotEqual('bool_: true', '')
self.assertNotEqual('bool_: false', '')
self.assertNotEqual('bool_: true', 'bool_: false')
self.assertEqual('bool_: false', 'bool_: false')
self.assertEqual('bool_: true', 'bool_: true')
self.assertNotEqual('enum_: A', '')
self.assertNotEqual('enum_: B', 'enum_: A')
self.assertNotEqual('enum_: C', 'enum_: B')
self.assertEqual('enum_: C', 'enum_: C')
def testRepeatedPrimitives(self):
self.assertNotEqual('int64s: 0', '')
self.assertEqual('int64s: 0', 'int64s: 0')
self.assertNotEqual('int64s: 1', 'int64s: 0')
self.assertNotEqual('int64s: 0 int64s: 0', '')
self.assertNotEqual('int64s: 0 int64s: 0', 'int64s: 0')
self.assertNotEqual('int64s: 1 int64s: 0', 'int64s: 0')
self.assertNotEqual('int64s: 0 int64s: 1', 'int64s: 0')
self.assertNotEqual('int64s: 1', 'int64s: 0 int64s: 2')
self.assertNotEqual('int64s: 2 int64s: 0', 'int64s: 1')
self.assertEqual('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
self.assertEqual('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
self.assertNotEqual('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
self.assertNotEqual('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
self.assertNotEqual('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
self.assertNotEqual('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0')
self.assertNotEqual('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0 int64s: 2')
def testMessage(self):
self.assertNotEqual('small <>', '')
self.assertEqual('small <>', 'small <>')
self.assertNotEqual('small < strings: "a" >', '')
self.assertNotEqual('small < strings: "a" >', 'small <>')
self.assertEqual('small < strings: "a" >', 'small < strings: "a" >')
self.assertNotEqual('small < strings: "b" >', 'small < strings: "a" >')
self.assertNotEqual(
'small < strings: "a" strings: "b" >', 'small < strings: "a" >'
)
self.assertNotEqual('string_: "a"', 'small <>')
self.assertNotEqual('string_: "a"', 'small < strings: "b" >')
self.assertNotEqual('string_: "a"', 'small < strings: "b" strings: "c" >')
self.assertNotEqual('string_: "a" small <>', 'small <>')
self.assertNotEqual('string_: "a" small <>', 'small < strings: "b" >')
self.assertEqual('string_: "a" small <>', 'string_: "a" small <>')
self.assertNotEqual(
'string_: "a" small < strings: "a" >', 'string_: "a" small <>'
)
self.assertEqual('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEqual(
'string_: "a" small < strings: "a" >',
'int64_: 1 small < strings: "a" >',
)
self.assertNotEqual('string_: "a" small < strings: "a" >', 'int64_: 1')
self.assertNotEqual('string_: "a"', 'int64_: 1 small < strings: "a" >')
self.assertNotEqual(
'string_: "a" int64_: 0 small < strings: "a" >',
'int64_: 1 small < strings: "a" >',
)
self.assertNotEqual(
'string_: "a" int64_: 1 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >',
)
self.assertEqual('string_: "a" int64_: 0 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
def testNestedMessage(self):
self.assertNotEqual('medium <>', '')
self.assertEqual('medium <>', 'medium <>')
self.assertNotEqual('medium < smalls <> >', 'medium <>')
self.assertEqual('medium < smalls <> >', 'medium < smalls <> >')
self.assertNotEqual(
'medium < smalls <> smalls <> >', 'medium < smalls <> >'
)
self.assertEqual('medium < smalls <> smalls <> >',
'medium < smalls <> smalls <> >')
self.assertNotEqual('medium < int32s: 0 >', 'medium < smalls <> >')
self.assertNotEqual(
'medium < smalls < strings: "a"> >', 'medium < smalls <> >'
)
def testTagOrder(self):
"""Tests that different fields are ordered by tag number.
For reference, here are the relevant tag numbers from compare_test.proto:
optional string string_ = 1;
optional int64 int64_ = 2;
optional float float_ = 3;
optional Small small = 8;
optional Medium medium = 7;
optional Small small = 8;
"""
self.assertNotEqual(
'string_: "a" ',
' int64_: 1 ',
)
self.assertNotEqual(
'string_: "a" int64_: 2 ',
' int64_: 1 ',
)
self.assertNotEqual(
'string_: "b" int64_: 1 ',
'string_: "a" int64_: 2 ',
)
self.assertEqual('string_: "a" int64_: 1 ',
'string_: "a" int64_: 1 ')
self.assertNotEqual(
'string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 ',
)
self.assertEqual('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEqual(
'string_: "a" int64_: 1 float_: 0.1',
'string_: "a" int64_: 1 float_: 0.0',
)
self.assertNotEqual(
'string_: "a" int64_: 2 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.1',
)
self.assertNotEqual(
'string_: "a" ',
' int64_: 1 float_: 0.1',
)
self.assertNotEqual(
'string_: "a" float_: 0.0',
' int64_: 1 ',
)
self.assertNotEqual(
'string_: "b" float_: 0.0',
'string_: "a" int64_: 1 ',
)
self.assertNotEqual('string_: "a"', 'small < strings: "a" >')
self.assertNotEqual(
'string_: "a" small < strings: "a" >', 'small < strings: "b" >'
)
self.assertNotEqual(
'string_: "a" small < strings: "b" >',
'string_: "a" small < strings: "a" >',
)
self.assertEqual('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEqual(
'string_: "a" medium <>', 'string_: "a" small < strings: "a" >'
)
self.assertNotEqual(
'string_: "a" medium < smalls <> >',
'string_: "a" small < strings: "a" >',
)
self.assertNotEqual('medium <>', 'small < strings: "a" >')
self.assertNotEqual('medium <> small <>', 'small < strings: "a" >')
self.assertNotEqual('medium < smalls <> >', 'small < strings: "a" >')
self.assertNotEqual(
'medium < smalls < strings: "a" > >', 'small < strings: "b" >'
)
def testIsClose(self):
self.assertTrue(compare.isClose(1, 1, 1e-10))
self.assertTrue(compare.isClose(65061.0420, 65061.0322, 1e-5))
self.assertFalse(compare.isClose(65061.0420, 65061.0322, 1e-7))
def testIsCloseNan(self):
self.assertTrue(compare.isClose(float('nan'), float('nan'), 1e-10))
self.assertFalse(compare.isClose(float('nan'), 1, 1e-10))
self.assertFalse(compare.isClose(1, float('nan'), 1e-10))
self.assertFalse(compare.isClose(float('nan'), float('inf'), 1e-10))
def testIsCloseInf(self):
self.assertTrue(compare.isClose(float('inf'), float('inf'), 1e-10))
self.assertTrue(compare.isClose(float('-inf'), float('-inf'), 1e-10))
self.assertFalse(compare.isClose(float('-inf'), float('inf'), 1e-10))
self.assertFalse(compare.isClose(float('inf'), 1, 1e-10))
self.assertFalse(compare.isClose(1, float('inf'), 1e-10))
def testIsCloseSubnormal(self):
x = sys.float_info.min * sys.float_info.epsilon
self.assertTrue(compare.isClose(x, x, 1e-10))
self.assertFalse(compare.isClose(x, 1, 1e-10))
| ProtoEqTest |
python | protocolbuffers__protobuf | python/google/protobuf/internal/descriptor_pool_test.py | {
"start": 44510,
"end": 45559
} | class ____(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
test.assertEqual(file_desc, field_desc.file)
test.assertEqual(field_desc.default_value, None)
| MessageField |
python | kamyu104__LeetCode-Solutions | Python/where-will-the-ball-fall.py | {
"start": 33,
"end": 512
} | class ____(object):
def findBall(self, grid):
"""
:type grid: List[List[int]]
:rtype: List[int]
"""
result = []
for c in xrange(len(grid[0])):
for r in xrange(len(grid)):
nc = c+grid[r][c]
if not (0 <= nc < len(grid[0]) and grid[r][nc] == grid[r][c]):
c = -1
break
c = nc
result.append(c)
return result
| Solution |
python | huggingface__transformers | tests/models/maskformer/test_modeling_maskformer_swin.py | {
"start": 1332,
"end": 6129
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=2,
num_channels=3,
embed_dim=16,
depths=[1, 2, 1],
num_heads=[2, 2, 4],
window_size=2,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=True,
type_sequence_label_size=10,
encoder_stride=8,
out_features=["stage1", "stage2", "stage3"],
out_indices=[1, 2, 3],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.type_sequence_label_size = type_sequence_label_size
self.encoder_stride = encoder_stride
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return MaskFormerSwinConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
embed_dim=self.embed_dim,
depths=self.depths,
num_heads=self.num_heads,
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
use_absolute_embeddings=self.use_absolute_embeddings,
path_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
encoder_stride=self.encoder_stride,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = MaskFormerSwinModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def create_and_check_backbone(self, config, pixel_values, labels):
model = MaskFormerSwinBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [13, 16, 16, 16])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, [16, 32, 64])
# verify ValueError
with self.parent.assertRaises(ValueError):
config.out_features = ["stem"]
model = MaskFormerSwinBackbone(config=config)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| MaskFormerSwinModelTester |
python | run-llama__llama_index | llama-index-core/llama_index/core/chat_ui/models/artifact.py | {
"start": 106,
"end": 183
} | class ____(str, Enum):
CODE = "code"
DOCUMENT = "document"
| ArtifactType |
python | getsentry__sentry | src/sentry/taskworker/app.py | {
"start": 315,
"end": 2764
} | class ____:
"""
Container for an application's task setup and configuration.
"""
def __init__(self, taskregistry: TaskRegistry | None = None) -> None:
self._config = {
"rpc_secret": None,
"at_most_once_timeout": None,
}
self._modules: Iterable[str] = []
self._taskregistry = taskregistry or TaskRegistry()
self._at_most_once_store: AtMostOnceStore | None = None
@property
def taskregistry(self) -> TaskRegistry:
"""Get the TaskRegistry instance from this app"""
return self._taskregistry
@property
def config(self) -> dict[str, Any]:
"""Get the config data"""
return self._config
def set_config(self, config: dict[str, Any]) -> None:
"""Update configuration data"""
for key, value in config.items():
if key in self._config:
self._config[key] = value
def set_modules(self, modules: Iterable[str]) -> None:
"""
Set the list of modules containing tasks to be loaded by workers and schedulers.
"""
self._modules = modules
def load_modules(self) -> None:
"""Load all of the configured modules"""
for mod in self._modules:
__import__(mod)
def at_most_once_store(self, backend: AtMostOnceStore) -> None:
"""
Set the backend store for `at_most_once` tasks.
The storage implementation should support atomic operations
to avoid races with at_most_once tasks.
"""
self._at_most_once_store = backend
def should_attempt_at_most_once(self, activation: TaskActivation) -> bool:
if not self._at_most_once_store:
return True
key = get_at_most_once_key(activation.namespace, activation.taskname, activation.id)
return self._at_most_once_store.add(
key, "1", timeout=self._config["at_most_once_timeout"] or 60
)
def get_at_most_once_key(namespace: str, taskname: str, task_id: str) -> str:
# tw:amo -> taskworker:at_most_once
return f"tw:amo:{namespace}:{taskname}:{task_id}"
def import_app(app_module: str) -> TaskworkerApp:
"""
Resolve an application path like `acme.worker.runtime:app`
into the `app` symbol defined in the module.
"""
module_name, name = app_module.split(":")
module = importlib.import_module(module_name)
return getattr(module, name)
| TaskworkerApp |
python | ray-project__ray | python/ray/serve/tests/unit/test_application_state.py | {
"start": 1874,
"end": 9106
} | class ____:
def __init__(self, kv_store):
self.kv_store = kv_store
self.deployment_infos: Dict[DeploymentID, DeploymentInfo] = dict()
self.deployment_statuses: Dict[DeploymentID, DeploymentStatusInfo] = dict()
self.deleting: Dict[DeploymentID, bool] = dict()
# Recover
recovered_deployments = self.kv_store.get("fake_deployment_state_checkpoint")
if recovered_deployments is not None:
for name, checkpointed_data in recovered_deployments.items():
(info, deleting) = checkpointed_data
self.deployment_infos[name] = info
self.deployment_statuses[name] = DeploymentStatusInfo(
name=name,
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message="",
)
self.deleting[name] = deleting
self._scaling_decisions = {}
def deploy(
self,
deployment_id: DeploymentID,
deployment_info: DeploymentInfo,
):
existing_info = self.deployment_infos.get(deployment_id)
self.deleting[deployment_id] = False
self.deployment_infos[deployment_id] = deployment_info
if not existing_info or existing_info.version != deployment_info.version:
self.deployment_statuses[deployment_id] = DeploymentStatusInfo(
name=deployment_id.name,
status=DeploymentStatus.UPDATING,
status_trigger=DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
message="",
)
self.kv_store.put(
"fake_deployment_state_checkpoint",
dict(
zip(
self.deployment_infos.keys(),
zip(self.deployment_infos.values(), self.deleting.values()),
)
),
)
@property
def deployments(self) -> List[str]:
return list(self.deployment_infos.keys())
def get_deployment_statuses(self, ids: List[DeploymentID]):
return [self.deployment_statuses[id] for id in ids]
def get_deployment(self, deployment_id: DeploymentID) -> DeploymentInfo:
if deployment_id in self.deployment_statuses:
# Return dummy deployment info object
return DeploymentInfo(
deployment_config=DeploymentConfig(
num_replicas=self.deployment_infos[
deployment_id
].deployment_config.num_replicas,
user_config={},
),
replica_config=ReplicaConfig.create(lambda x: x),
start_time_ms=0,
deployer_job_id="",
)
def get_deployments_in_application(self, app_name: str):
deployments = []
for deployment_id in self.deployment_infos:
if deployment_id.app_name == app_name:
deployments.append(deployment_id.name)
return deployments
def set_deployment_unhealthy(self, id: DeploymentID):
self.deployment_statuses[id].status = DeploymentStatus.UNHEALTHY
def set_deployment_deploy_failed(self, id: DeploymentID):
self.deployment_statuses[id].status = DeploymentStatus.DEPLOY_FAILED
def set_deployment_healthy(self, id: DeploymentID):
self.deployment_statuses[id].status = DeploymentStatus.HEALTHY
def set_deployment_updating(self, id: DeploymentID):
self.deployment_statuses[id].status = DeploymentStatus.UPDATING
def set_deployment_deleted(self, id: str):
if not self.deployment_infos[id]:
raise ValueError(
f"Tried to mark deployment {id} as deleted, but {id} not found"
)
if not self.deleting[id]:
raise ValueError(
f"Tried to mark deployment {id} as deleted, but delete_deployment()"
f"hasn't been called for {id} yet"
)
del self.deployment_infos[id]
del self.deployment_statuses[id]
del self.deleting[id]
def delete_deployment(self, id: DeploymentID):
self.deleting[id] = True
def get_deployment_target_num_replicas(self, id: DeploymentID) -> Optional[int]:
return self.deployment_infos[id].deployment_config.num_replicas
def save_checkpoint(self):
"""Mock save checkpoint method."""
pass
def autoscale(self, id: DeploymentID, target_num_replicas: int):
self._scaling_decisions[id] = target_num_replicas
return True
def get_deployment_route_patterns(self, id: DeploymentID) -> Optional[List[str]]:
return None
def get_deployment_outbound_deployments(
self, id: DeploymentID
) -> Optional[List[DeploymentID]]:
"""Mock method to return outbound deployments for a deployment."""
# Return None by default, tests can override this
return getattr(self, f"_outbound_deps_{id.name}_{id.app_name}", None)
@pytest.fixture
def mocked_application_state_manager() -> (
Tuple[ApplicationStateManager, MockDeploymentStateManager]
):
kv_store = MockKVStore()
deployment_state_manager = MockDeploymentStateManager(kv_store)
application_state_manager = ApplicationStateManager(
deployment_state_manager,
AutoscalingStateManager(),
MockEndpointState(),
kv_store,
LoggingConfig(),
)
yield application_state_manager, deployment_state_manager, kv_store
def deployment_params(
name: str,
route_prefix: str = None,
autoscaling_config: AutoscalingConfig = None,
num_replicas: int = 1,
):
return {
"deployment_name": name,
"deployment_config_proto_bytes": DeploymentConfig(
num_replicas=num_replicas,
user_config={},
version=get_random_string(),
autoscaling_config=autoscaling_config,
).to_proto_bytes(),
"replica_config_proto_bytes": ReplicaConfig.create(
lambda x: x
).to_proto_bytes(),
"deployer_job_id": "random",
"route_prefix": route_prefix,
"ingress": route_prefix is not None,
"serialized_autoscaling_policy_def": None,
"serialized_request_router_cls": None,
}
def deployment_info(
name: str,
route_prefix: str = None,
autoscaling_config: AutoscalingConfig = None,
num_replicas: int = 1,
):
params = deployment_params(name, route_prefix, autoscaling_config, num_replicas)
return deploy_args_to_deployment_info(**params, app_name="test_app")
@pytest.fixture
def mocked_application_state() -> Tuple[ApplicationState, MockDeploymentStateManager]:
kv_store = MockKVStore()
deployment_state_manager = MockDeploymentStateManager(kv_store)
application_state = ApplicationState(
name="test_app",
deployment_state_manager=deployment_state_manager,
autoscaling_state_manager=AutoscalingStateManager(),
endpoint_state=MockEndpointState(),
logging_config=LoggingConfig(),
external_scaler_enabled=False,
)
yield application_state, deployment_state_manager
| MockDeploymentStateManager |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 63173,
"end": 70073
} | class ____:
async def test_result_stored_with_storage_key_if_no_policy_set(
self, prefect_client
):
# avoid conflicts
key = f"foo-bar-{random.randint(0, 10000)}"
@task(persist_result=True, result_storage_key=key)
async def async_task():
return 1800
state = await async_task(return_state=True)
assert state.is_completed()
assert await state.result() == 1800
assert Path(state.data.metadata.storage_key).name == key
async def test_cache_expiration_is_respected(self, advance_time, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("local-fs")
@task(
persist_result=True,
result_storage_key="expiring-foo-bar",
cache_expiration=timedelta(seconds=1.0),
result_storage=fs,
)
async def async_task():
return random.randint(0, 10000)
first_state = await async_task(return_state=True)
assert first_state.is_completed()
first_result = await first_state.result()
second_state = await async_task(return_state=True)
assert second_state.is_completed()
second_result = await second_state.result()
assert first_result == second_result, "Cache was not used"
# let cache expire...
advance_time(timedelta(seconds=1.1))
third_state = await async_task(return_state=True)
assert third_state.is_completed()
third_result = await third_state.result()
# cache expired, new result
assert third_result not in [first_result, second_result], "Cache did not expire"
async def test_cache_expiration_expires(self, prefect_client, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("test-once")
@task(
persist_result=True,
result_storage_key="expiring-foo-bar",
cache_expiration=timedelta(seconds=0.0),
result_storage=fs,
)
async def async_task():
return random.randint(0, 10000)
first_state = await async_task(return_state=True)
assert first_state.is_completed()
await asyncio.sleep(0.1)
second_state = await async_task(return_state=True)
assert second_state.is_completed()
assert await first_state.result() != await second_state.result(), (
"Cache did not expire"
)
async def test_none_policy_with_persist_result_false(self, prefect_client):
@task(cache_policy=None, result_storage_key=None, persist_result=False)
async def async_task():
return 1800
assert async_task.cache_policy is None
state = await async_task(return_state=True)
assert state.is_completed()
assert await state.result() == 1800
assert isinstance(state.data, ResultRecord)
assert not Path(state.data.metadata.storage_key).exists()
async def test_none_return_value_does_persist(self, prefect_client, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("none-test")
FIRST_RUN = True
@task(
persist_result=True,
cache_key_fn=lambda *args, **kwargs: "test-none-caches",
result_storage=fs,
)
async def async_task():
nonlocal FIRST_RUN
if FIRST_RUN:
FIRST_RUN = False
return None
else:
return 42
first_val = await async_task()
# make sure test is behaving
assert FIRST_RUN is False
second_val = await async_task()
assert first_val is None
assert second_val is None
async def test_error_handling_on_cache_policies(self, prefect_client, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("error-handling-test")
@task(
cache_policy=TASK_SOURCE + INPUTS,
result_storage=fs,
)
def my_random_task(x: int, cmplx_input):
return random.randint(0, x)
@flow
def my_param_flow(x: int):
import threading
thread = threading.Thread()
first_val = my_random_task(x, cmplx_input=thread, return_state=True)
second_val = my_random_task(x, cmplx_input=thread, return_state=True)
return first_val, second_val
first, second = my_param_flow(4200)
assert first.name == "Completed"
assert second.name == "Completed"
first_result = await first.result()
second_result = await second.result()
assert first_result != second_result
async def test_flow_parameter_caching(self, prefect_client, tmp_path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("param-test")
@task(
cache_policy=FLOW_PARAMETERS,
result_storage=fs,
persist_result=True,
)
def my_random_task(x: int):
return random.randint(0, x)
@flow
def my_param_flow(x: int, other_val: str):
first_val = my_random_task(x, return_state=True)
second_val = my_random_task(x, return_state=True)
return first_val, second_val
first, second = my_param_flow(4200, other_val="foo")
assert first.name == "Completed"
assert second.name == "Cached"
first_result = await first.result()
second_result = await second.result()
assert first_result == second_result
third, fourth = my_param_flow(4200, other_val="bar")
assert third.name == "Completed"
assert fourth.name == "Cached"
third_result = await third.result()
fourth_result = await fourth.result()
assert third_result not in [first_result, second_result]
assert fourth_result not in [first_result, second_result]
async def test_bad_api_result_references_cause_reruns(self, tmp_path: Path):
fs = LocalFileSystem(basepath=tmp_path)
await fs.save("badapi")
PAYLOAD = {"return": 42}
@task(result_storage=fs, result_storage_key="tmp-first", persist_result=True)
async def first():
return PAYLOAD["return"], get_run_context().task_run
result, task_run = await run_task_async(first)
assert result == 42
assert await fs.read_path("tmp-first")
# delete record
path = fs._resolve_path("tmp-first")
os.unlink(path)
with pytest.raises(ValueError, match="does not exist"):
assert await fs.read_path("tmp-first")
# rerun with same task run ID
PAYLOAD["return"] = "bar"
result, task_run = await run_task_async(first, task_run=task_run)
assert result == "bar"
assert await fs.read_path("tmp-first")
| TestCachePolicy |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 1366,
"end": 1452
} | class ____(BadHttpMessage):
"""Base class for payload errors"""
| PayloadEncodingError |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 167631,
"end": 168008
} | class ____(ReplicaContextBase):
__doc__ = ReplicaContextBase.__doc__
def _batch_reduce_destination(x):
"""Returns the destinations for batch all-reduce."""
if isinstance(x, tensor_lib.Tensor):
# If this is a one device strategy.
return x.device
else:
return x
# ------------------------------------------------------------------------------
| ReplicaContextV1 |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 14519,
"end": 14825
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_WEAVIATE, frozen=True, exclude=True
)
model: Optional[str]
baseURL: Optional[str]
vectorizeClassName: bool
dimensions: Optional[int]
| _Text2VecWeaviateConfig |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/batch/utils.py | {
"start": 4785,
"end": 5145
} | class ____(BatchSubmitJobKwargsConfigKeys):
"""All keys loaded into the config which are related to the Batch Executor."""
MAX_SUBMIT_JOB_ATTEMPTS = "max_submit_job_attempts"
AWS_CONN_ID = "conn_id"
SUBMIT_JOB_KWARGS = "submit_job_kwargs"
REGION_NAME = "region_name"
CHECK_HEALTH_ON_STARTUP = "check_health_on_startup"
| AllBatchConfigKeys |
python | cherrypy__cherrypy | cherrypy/test/test_wsgi_vhost.py | {
"start": 51,
"end": 1061
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class ClassOfRoot(object):
def __init__(self, name):
self.name = name
@cherrypy.expose
def index(self):
return 'Welcome to the %s website!' % self.name
default = cherrypy.Application(None)
domains = {}
for year in range(1997, 2008):
app = cherrypy.Application(ClassOfRoot('Class of %s' % year))
domains['www.classof%s.example' % year] = app
cherrypy.tree.graft(cherrypy._cpwsgi.VirtualHost(default, domains))
def test_welcome(self):
if not cherrypy.server.using_wsgi:
return self.skip('skipped (not using WSGI)... ')
for year in range(1997, 2008):
self.getPage(
'/',
headers=[('Host', 'www.classof%s.example' % year)],
)
self.assertBody('Welcome to the Class of %s website!' % year)
| WSGI_VirtualHost_Test |
python | PrefectHQ__prefect | src/prefect/utilities/visualization.py | {
"start": 2907,
"end": 7421
} | class ____:
def __init__(self):
self.tasks: list[VizTask] = []
self.dynamic_task_counter: dict[str, int] = {}
self.object_id_to_task: dict[int, VizTask] = {}
def add_task(self, task: VizTask) -> None:
if task.name not in self.dynamic_task_counter:
self.dynamic_task_counter[task.name] = 0
else:
self.dynamic_task_counter[task.name] += 1
task.name = f"{task.name}-{self.dynamic_task_counter[task.name]}"
self.tasks.append(task)
def __enter__(self) -> Self:
TaskVizTrackerState.current = self
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
TaskVizTrackerState.current = None
def link_viz_return_value_to_viz_task(
self, viz_return_value: Any, viz_task: VizTask
) -> None:
"""
We cannot track booleans, Ellipsis, None, NotImplemented, or the integers from -5 to 256
because they are singletons.
"""
from prefect.utilities.engine import UNTRACKABLE_TYPES
if (type(viz_return_value) in UNTRACKABLE_TYPES) or (
isinstance(viz_return_value, int) and (-5 <= viz_return_value <= 256)
):
return
self.object_id_to_task[id(viz_return_value)] = viz_task
def build_task_dependencies(task_run_tracker: TaskVizTracker) -> graphviz.Digraph:
"""
Constructs a Graphviz directed graph object that represents the dependencies
between tasks in the given TaskVizTracker.
Parameters:
- task_run_tracker (TaskVizTracker): An object containing tasks and their
dependencies.
Returns:
- graphviz.Digraph: A directed graph object depicting the relationships and
dependencies between tasks.
Raises:
- GraphvizImportError: If there's an ImportError related to graphviz.
- FlowVisualizationError: If there's any other error during the visualization
process or if return values of tasks are directly accessed without
specifying a `viz_return_value`.
"""
try:
g = graphviz.Digraph()
for task in task_run_tracker.tasks:
g.node(task.name) # type: ignore[reportUnknownMemberType]
for upstream in task.upstream_tasks:
g.edge(upstream.name, task.name) # type: ignore[reportUnknownMemberType]
return g
except ImportError as exc:
raise GraphvizImportError from exc
except Exception:
raise FlowVisualizationError(
"Something went wrong building the flow's visualization."
" If you're interacting with the return value of a task"
" directly inside of your flow, you must set a set a `viz_return_value`"
", for example `@task(viz_return_value=[1, 2, 3])`."
)
def visualize_task_dependencies(graph: graphviz.Digraph, flow_run_name: str) -> None:
"""
Renders and displays a Graphviz directed graph representing task dependencies.
The graph is rendered in PNG format and saved with the name specified by
flow_run_name. After rendering, the visualization is opened and displayed.
Parameters:
- graph (graphviz.Digraph): The directed graph object to visualize.
- flow_run_name (str): The name to use when saving the rendered graph image.
Raises:
- GraphvizExecutableNotFoundError: If Graphviz isn't found on the system.
- FlowVisualizationError: If there's any other error during the visualization
process or if return values of tasks are directly accessed without
specifying a `viz_return_value`.
"""
try:
graph.render(filename=flow_run_name, view=True, format="png", cleanup=True) # type: ignore[reportUnknownMemberType]
except graphviz.backend.ExecutableNotFound as exc:
msg = (
"It appears you do not have Graphviz installed, or it is not on your "
"PATH. Please install Graphviz from http://www.graphviz.org/download/. "
"Note: Just installing the `graphviz` python package is not "
"sufficient."
)
raise GraphvizExecutableNotFoundError(msg) from exc
except Exception:
raise FlowVisualizationError(
"Something went wrong building the flow's visualization."
" If you're interacting with the return value of a task"
" directly inside of your flow, you must set a set a `viz_return_value`"
", for example `@task(viz_return_value=[1, 2, 3])`."
)
| TaskVizTracker |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.