language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/maximal-square.py | {
"start": 1093,
"end": 2114
} | class ____(object):
# @param {character[][]} matrix
# @return {integer}
def maximalSquare(self, matrix):
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
size = [[0 for j in xrange(n)] for i in xrange(m)]
max_size = 0
for j in xrange(n):
if matrix[0][j] == '1':
size[0][j] = 1
max_size = max(max_size, size[0][j])
for i in xrange(1, m):
if matrix[i][0] == '1':
size[i][0] = 1
else:
size[i][0] = 0
for j in xrange(1, n):
if matrix[i][j] == '1':
size[i][j] = min(size[i][j - 1], \
size[i - 1][j], \
size[i - 1][j - 1]) + 1
max_size = max(max_size, size[i][j])
else:
size[i][j] = 0
return max_size * max_size
# Time: O(n^2)
# Space: O(n^2)
# DP.
| Solution2 |
python | getsentry__sentry | src/sentry/integrations/utils/metrics.py | {
"start": 479,
"end": 1576
} | class ____(ABC):
"""Information about an event to be measured.
This is a generic base class not tied specifically to integrations. See
IntegrationEventLifecycleMetric for integration-specific key structure. (This
class could be moved from this module to a more generic package if we ever want
to use it outside of integrations.)
"""
@abstractmethod
def get_metric_key(self, outcome: EventLifecycleOutcome) -> str:
"""Get the metrics key that will identify this event."""
raise NotImplementedError
@abstractmethod
def get_metric_tags(self) -> Mapping[str, str]:
"""Get the metrics tags that will identify this event along with the key."""
raise NotImplementedError
def get_extras(self) -> Mapping[str, Any]:
"""Get extra data to log."""
return {}
def capture(
self, assume_success: bool = True, sample_log_rate: float = 1.0
) -> "EventLifecycle":
"""Open a context to measure the event."""
return EventLifecycle(self, assume_success, sample_log_rate)
| EventLifecycleMetric |
python | ray-project__ray | ci/ray_ci/automation/docker_tags_lib.py | {
"start": 7183,
"end": 21663
} | class ____(Exception):
"""
Exception for failing to retrieve auth token.
"""
def __init__(self, message: str):
super().__init__(f"Failed to retrieve auth token from {message}.")
def _get_docker_auth_token(namespace: str, repository: str) -> Optional[str]:
service, scope = (
"registry.docker.io",
f"repository:{namespace}/{repository}:pull",
)
auth_url = f"https://auth.docker.io/token?service={service}&scope={scope}"
response = requests.get(auth_url)
if response.status_code != 200:
raise AuthTokenException(f"Docker. Error code: {response.status_code}")
token = response.json().get("token", None)
return token
def _get_docker_hub_auth_token(username: str, password: str) -> Optional[str]:
url = "https://hub.docker.com/v2/users/login"
json_body = {
"username": username,
"password": password,
}
headers = {"Content-Type": "application/json"}
response = requests.post(url, headers=headers, json=json_body)
if response.status_code != 200:
raise AuthTokenException(f"Docker Hub. Error code: {response.status_code}")
return response.json().get("token", None)
def _get_git_log(n_days: int = 30) -> str:
return subprocess.check_output(
[
"git",
"log",
f"--until='{n_days} days ago'",
"--pretty=format:%H",
],
text=True,
)
def _list_recent_commit_short_shas(n_days: int = 30) -> List[str]:
"""
Get list of recent commit SHAs (short version) on ray master branch.
Args:
n_days: Number of days to go back in git log.
Returns:
List of recent commit SHAs (short version).
"""
commit_shas = _get_git_log(n_days=n_days)
short_commit_shas = [
commit_sha[:SHA_LENGTH] for commit_sha in commit_shas.split("\n") if commit_sha
]
return short_commit_shas
def _get_config_docker_oci(tag: str, namespace: str, repository: str):
"""Get Docker image config from tag using OCI API."""
token = _get_docker_auth_token(namespace, repository)
# Pull image manifest to get config digest
headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/vnd.docker.distribution.manifest.v2+json",
}
image_manifest_url = (
f"https://registry-1.docker.io/v2/{namespace}/{repository}/manifests/{tag}"
)
response = requests.get(image_manifest_url, headers=headers)
if response.status_code != 200:
raise RetrieveImageConfigException("image manifest.")
config_blob_digest = response.json()["config"]["digest"]
# Pull image config
config_blob_url = f"https://registry-1.docker.io/v2/{namespace}/{repository}/blobs/{config_blob_digest}" # noqa E501
config_headers = {
"Authorization": f"Bearer {token}",
"Accept": "application/vnd.docker.container.image.v1+json",
}
response = requests.get(config_blob_url, headers=config_headers)
if response.status_code != 200:
raise RetrieveImageConfigException("image config.")
return response.json()
def _get_image_creation_time(tag: str) -> datetime:
namespace, repo_tag = tag.split("/")
repository, tag = repo_tag.split(":")
config = _get_config_docker_oci(tag=tag, namespace=namespace, repository=repository)
if "created" not in config:
raise RetrieveImageConfigException("image creation time.")
return parser.isoparse(config["created"])
def delete_tag(tag: str, docker_hub_token: str) -> bool:
"""Delete tag from Docker Hub repo."""
headers = {
"Authorization": f"Bearer {docker_hub_token}",
}
namespace, repo_tag = tag.split("/")
repository, tag_name = repo_tag.split(":")
url = f"https://hub.docker.com/v2/repositories/{namespace}/{repository}/tags/{tag_name}" # noqa E501
response = requests.delete(url, headers=headers)
if response.status_code == 429:
raise DockerHubRateLimitException()
if response.status_code != 204:
logger.info(f"Failed to delete {tag}, status code: {response.json()}")
return False
logger.info(f"Deleted tag {tag}")
return True
def query_tags_from_docker_hub(
filter_func: Callable[[str], bool],
namespace: str,
repository: str,
docker_hub_token: str,
num_tags: Optional[int] = None,
) -> List[str]:
"""
Query tags from Docker Hub repository with filter.
If Docker Hub API returns an error, the function will:
- Stop querying
- Return the current list of tags.
Args:
filter_func: Function to return whether tag should be included.
namespace: Docker namespace
repository: Docker repository
num_tags: Max number of tags to query
Returns:
Sorted list of tags from Docker Hub repository
with format namespace/repository:tag.
"""
filtered_tags = []
headers = {
"Authorization": f"Bearer {docker_hub_token}",
}
page_count = 1
while page_count:
logger.info(f"Querying page {page_count}")
url = f"https://hub.docker.com/v2/namespaces/{namespace}/repositories/{repository}/tags?page={page_count}&page_size=100" # noqa E501
response = requests.get(url, headers=headers)
response_json = response.json()
# Stop querying if Docker Hub API returns an error
if response.status_code != 200:
logger.info(f"Failed to query tags from Docker Hub: Error: {response_json}")
return sorted([f"{namespace}/{repository}:{t}" for t in filtered_tags])
result = response_json["results"]
tags = [tag["name"] for tag in result]
filtered_tags_page = list(filter(filter_func, tags)) # Filter tags
# Add enough tags to not exceed num_tags if num_tags is specified
if num_tags:
if len(filtered_tags) + len(filtered_tags_page) > num_tags:
filtered_tags.extend(
filtered_tags_page[: num_tags - len(filtered_tags)]
)
break
filtered_tags.extend(filtered_tags_page)
logger.info(f"Tag count: {len(filtered_tags)}")
if not response_json["next"]:
break
page_count += 1
return sorted([f"{namespace}/{repository}:{t}" for t in filtered_tags])
def query_tags_from_docker_with_oci(namespace: str, repository: str) -> List[str]:
"""
Query all repo tags from Docker using OCI API.
Args:
namespace: Docker namespace
repository: Docker repository
Returns:
List of tags from Docker Registry in format namespace/repository:tag.
"""
token = _get_docker_auth_token(namespace, repository)
headers = {
"Authorization": f"Bearer {token}",
}
url = f"https://registry-1.docker.io/v2/{namespace}/{repository}/tags/list"
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise Exception(f"Failed to query tags from Docker: {response.json()}")
return [f"{namespace}/{repository}:{t}" for t in response.json()["tags"]]
def _is_release_tag(
tag: str,
release_versions: Optional[List[str]] = None,
) -> bool:
"""
Check if tag is a release tag & is in the list of release versions.
Tag input format should be just the tag name, without namespace/repository.
Tag input can be in any format queried from Docker Hub: "x.y.z-...", "a1s2d3-..."
Args:
tag: Docker tag name
release_versions: List of release versions.
If None, don't filter by release version.
Returns:
True if tag is a release tag and is in the list of release versions.
False otherwise.
"""
versions = tag.split(".")
if len(versions) != 3 and "post1" not in tag:
return False
# Parse variables into major, minor, patch version
major, minor, patch = versions[0], versions[1], versions[2]
extra = versions[3] if len(versions) > 3 else None
if not major.isnumeric() or not minor.isnumeric():
return False
if not patch.isnumeric() and "rc" not in patch and "-" not in patch:
return False
if "-" in patch:
patch = patch.split("-")[0]
release_version = ".".join([major, minor, patch])
if extra:
release_version += f".{extra}"
if release_versions and release_version not in release_versions:
return False
return True
def _crane_binary():
r = runfiles.Create()
system = platform.system()
if system != "Linux" or platform.processor() != "x86_64":
raise ValueError(f"Unsupported platform: {system}")
return r.Rlocation("crane_linux_x86_64/crane")
def call_crane_copy(source: str, destination: str) -> Tuple[int, str]:
try:
with subprocess.Popen(
[
_crane_binary(),
"copy",
source,
destination,
],
stdout=subprocess.PIPE,
text=True,
) as proc:
output = ""
for line in proc.stdout:
logger.info(line + "\n")
output += line
return_code = proc.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, proc.args)
return return_code, output
except subprocess.CalledProcessError as e:
return e.returncode, e.output
def _call_crane_cp(tag: str, source: str, aws_ecr_repo: str) -> Tuple[int, str]:
try:
with subprocess.Popen(
[
_crane_binary(),
"cp",
source,
f"{aws_ecr_repo}:{tag}",
],
stdout=subprocess.PIPE,
text=True,
) as proc:
output = ""
for line in proc.stdout:
logger.info(line + "\n")
output += line
return_code = proc.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, proc.args)
return return_code, output
except subprocess.CalledProcessError as e:
return e.returncode, e.output
def _call_crane_index(index_name: str, tags: List[str]) -> Tuple[int, str]:
try:
with subprocess.Popen(
[
_crane_binary(),
"index",
"append",
"-m",
tags[0],
"-m",
tags[1],
"-t",
index_name,
],
stdout=subprocess.PIPE,
text=True,
) as proc:
output = ""
for line in proc.stdout:
logger.info(line + "\n")
output += line
return_code = proc.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, proc.args)
return return_code, output
except subprocess.CalledProcessError as e:
return e.returncode, e.output
def _call_crane_manifest(tag: str) -> Tuple[int, str]:
try:
with subprocess.Popen(
[
_crane_binary(),
"manifest",
tag,
],
stdout=subprocess.PIPE,
text=True,
) as proc:
output = ""
for line in proc.stdout:
logger.info(line + "\n")
output += line
return_code = proc.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, proc.args)
return return_code, output
except subprocess.CalledProcessError as e:
return e.returncode, e.output
def copy_tag_to_aws_ecr(tag: str, aws_ecr_repo: str) -> bool:
"""
Copy tag from Docker Hub to AWS ECR.
Args:
tag: Docker tag name in format "namespace/repository:tag"
Returns:
True if tag was copied successfully, False otherwise.
"""
_, repo_tag = tag.split("/")
tag_name = repo_tag.split(":")[1]
logger.info(f"Copying from {tag} to {aws_ecr_repo}:{tag_name}......")
return_code, output = call_crane_copy(
source=tag,
destination=f"{aws_ecr_repo}:{tag_name}",
)
if return_code:
logger.info(f"Failed to copy {tag} to {aws_ecr_repo}:{tag_name}......")
logger.info(f"Error: {output}")
return False
logger.info(f"Copied {tag} to {aws_ecr_repo}:{tag_name} successfully")
return True
def backup_release_tags(
namespace: str,
repository: str,
aws_ecr_repo: str,
docker_username: str,
docker_password: str,
release_versions: Optional[List[str]] = None,
) -> None:
"""
Backup release tags to AWS ECR.
Args:
release_versions: List of release versions to backup
aws_ecr_repo: AWS ECR repository
"""
docker_hub_token = _get_docker_hub_auth_token(docker_username, docker_password)
docker_hub_tags = query_tags_from_docker_hub(
filter_func=lambda t: _is_release_tag(t, release_versions),
namespace=namespace,
repository=repository,
docker_hub_token=docker_hub_token,
)
_write_to_file("release_tags.txt", docker_hub_tags)
for t in docker_hub_tags:
copy_tag_to_aws_ecr(tag=t, aws_ecr_repo=aws_ecr_repo)
def _write_to_file(file_path: str, content: List[str]) -> None:
file_path = os.path.join(bazel_workspace_dir, file_path)
logger.info(f"Writing to {file_path}......")
with open(file_path, "w") as f:
f.write("\n".join(content))
def generate_index(index_name: str, tags: List[str]) -> bool:
print(f"Generating index {index_name} with tags {tags}")
# Make sure tag is an image and not an index
for tag in tags:
return_code, output = _call_crane_manifest(tag)
if return_code:
logger.info(f"Failed to get manifest for {tag}")
logger.info(f"Error: {output}")
return False
if "application/vnd.docker.distribution.manifest.list.v2+json" in output:
logger.info(f"Tag {tag} is an index, not an image")
return False
return_code, output = _call_crane_index(index_name=index_name, tags=tags)
if return_code:
logger.info(f"Failed to generate index {index_name}......")
logger.info(f"Error: {output}")
return False
logger.info(f"Generated index {index_name} successfully")
return True
| AuthTokenException |
python | doocs__leetcode | solution/1900-1999/1995.Count Special Quadruplets/Solution.py | {
"start": 0,
"end": 390
} | class ____:
def countQuadruplets(self, nums: List[int]) -> int:
ans, n = 0, len(nums)
for a in range(n - 3):
for b in range(a + 1, n - 2):
for c in range(b + 1, n - 1):
for d in range(c + 1, n):
if nums[a] + nums[b] + nums[c] == nums[d]:
ans += 1
return ans
| Solution |
python | walkccc__LeetCode | solutions/3146. Permutation Difference between Two Strings/3146.py | {
"start": 0,
"end": 187
} | class ____:
def findPermutationDifference(self, s: str, t: str) -> int:
indices = {c: i for i, c in enumerate(s)}
return sum([abs(indices[c] - i) for i, c in enumerate(t)])
| Solution |
python | kamyu104__LeetCode-Solutions | Python/check-if-grid-satisfies-conditions.py | {
"start": 41,
"end": 380
} | class ____(object):
def satisfiesConditions(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
return (all(grid[i][j] == grid[i+1][j] for j in xrange(len(grid[0])) for i in xrange(len(grid)-1)) and
all(grid[0][j] != grid[0][j+1] for j in xrange(len(grid[0])-1)))
| Solution |
python | python__mypy | mypy/checkexpr.py | {
"start": 292003,
"end": 292720
} | class ____(types.BoolTypeQuery):
"""Query whether an argument type should be inferred in the second pass.
The result is True if the type has a type variable in a callable return
type anywhere. For example, the result for Callable[[], T] is True if t is
a type variable.
"""
def __init__(self) -> None:
super().__init__(types.ANY_STRATEGY)
def visit_callable_type(self, t: CallableType) -> bool:
# TODO: we need to check only for type variables of original callable.
return self.query_types(t.arg_types) or has_type_vars(t)
def has_erased_component(t: Type | None) -> bool:
return t is not None and t.accept(HasErasedComponentsQuery())
| ArgInferSecondPassQuery |
python | wandb__wandb | wandb/vendor/pygments/lexers/rdf.py | {
"start": 6304,
"end": 9398
} | class ____(RegexLexer):
"""
Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
.. versionadded:: 2.1
"""
name = 'Turtle'
aliases = ['turtle']
filenames = ['*.ttl']
mimetypes = ['text/turtle', 'application/x-turtle']
flags = re.IGNORECASE
patterns = {
'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
# PNAME_NS PN_LOCAL (with simplified character range)
patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
tokens = {
'root': [
(r'\s+', Whitespace),
# Base / prefix
(r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation)),
(r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Variable, Whitespace, Punctuation)),
# The shorthand predicate 'a'
(r'(?<=\s)a(?=\s)', Keyword.Type),
# IRIREF
(r'%(IRIREF)s' % patterns, Name.Variable),
# PrefixedName
(r'%(PrefixedName)s' % patterns,
bygroups(Name.Namespace, Name.Tag)),
# Comment
(r'#[^\n]+', Comment),
(r'\b(true|false)\b', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'[\[\](){}.;,:^]', Punctuation),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'.', String, '#pop'),
],
'end-of-string': [
(r'(@)([a-z]+(:?-[a-z0-9]+)*)',
bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(PrefixedName)s' % patterns,
bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'),
default('#pop:2'),
],
}
| TurtleLexer |
python | getsentry__sentry | src/sentry/monitors/processing_errors/errors.py | {
"start": 1865,
"end": 2104
} | class ____(TypedDict):
"""
We dropped a checkin due to invalid duration
"""
type: Literal[ProcessingErrorType.CHECKIN_INVALID_DURATION]
duration: str
"""
The user provided duration
"""
| CheckinInvalidDuration |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 7227,
"end": 7293
} | class ____(BaseModel):
reasoning_tokens: int
| OutputTokensDetails |
python | doocs__leetcode | solution/1900-1999/1937.Maximum Number of Points with Cost/Solution.py | {
"start": 0,
"end": 518
} | class ____:
def maxPoints(self, points: List[List[int]]) -> int:
n = len(points[0])
f = points[0][:]
for p in points[1:]:
g = [0] * n
lmx = -inf
for j in range(n):
lmx = max(lmx, f[j] + j)
g[j] = max(g[j], p[j] + lmx - j)
rmx = -inf
for j in range(n - 1, -1, -1):
rmx = max(rmx, f[j] - j)
g[j] = max(g[j], p[j] + rmx + j)
f = g
return max(f)
| Solution |
python | pyca__cryptography | tests/hazmat/primitives/test_ec.py | {
"start": 2647,
"end": 2747
} | class ____(ec.EllipticCurve):
name = "dummy-curve"
key_size = 1
group_order = 1
| DummyCurve |
python | ipython__ipython | IPython/core/formatters.py | {
"start": 31355,
"end": 32958
} | class ____(BaseFormatter):
"""An escape-hatch Formatter for objects that know how to display themselves.
To define the callables that compute the representation of your
objects, define a :meth:`_ipython_display_` method or use the :meth:`for_type`
or :meth:`for_type_by_name` methods to register functions that handle
this. Unlike mime-type displays, this method should not return anything,
instead calling any appropriate display methods itself.
This display formatter has highest priority.
If it fires, no other display formatter will be called.
Prior to IPython 6.1, `_ipython_display_` was the only way to display custom mime-types
without registering a new Formatter.
IPython 6.1 introduces `_repr_mimebundle_` for displaying custom mime-types,
so `_ipython_display_` should only be used for objects that require unusual
display patterns, such as multiple display calls.
"""
print_method = ObjectName('_ipython_display_')
_return_type = (type(None), bool)
@catch_format_error
def __call__(self, obj):
"""Compute the format for an object."""
if self.enabled:
# lookup registered printer
try:
printer = self.lookup(obj)
except KeyError:
pass
else:
printer(obj)
return True
# Finally look for special method names
method = get_real_method(obj, self.print_method)
if method is not None:
method()
return True
| IPythonDisplayFormatter |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1529012,
"end": 1530631
} | class ____(Transform):
"""
StackTransform schema wrapper.
Parameters
----------
groupby : Sequence[str, :class:`FieldName`]
The data fields to group by.
stack : str, :class:`FieldName`
The field which is stacked.
offset : Literal['zero', 'center', 'normalize']
Mode for stacking marks. One of ``"zero"`` (default), ``"center"``, or
``"normalize"``. The ``"zero"`` offset will stack starting at ``0``. The
``"center"`` offset will center the stacks. The ``"normalize"`` offset will compute
percentage values for each stack point, with output values in the range ``[0,1]``.
**Default value:** ``"zero"``
sort : Sequence[dict, :class:`SortField`]
Field that determines the order of leaves in the stacked charts.
as : str, :class:`FieldName`, Sequence[str, :class:`FieldName`]
Output field names. This can be either a string or an array of strings with two
elements denoting the name for the fields for stack start and stack end
respectively. If a single string(e.g., ``"val"``) is provided, the end field will be
``"val_end"``.
"""
_schema = {"$ref": "#/definitions/StackTransform"}
def __init__(
self,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
stack: Optional[str | SchemaBase] = Undefined,
offset: Optional[StackOffset_T] = Undefined,
sort: Optional[Sequence[SchemaBase | Map]] = Undefined,
**kwds,
):
super().__init__(groupby=groupby, stack=stack, offset=offset, sort=sort, **kwds)
| StackTransform |
python | falconry__falcon | tests/test_typing.py | {
"start": 727,
"end": 806
} | class ____(falcon.asgi.Request):
context_type = RichContext
| FancyAsyncRequest |
python | keon__algorithms | algorithms/graph/minimum_spanning_tree.py | {
"start": 126,
"end": 327
} | class ____:
"""
An edge of an undirected graph
"""
def __init__(self, source, target, weight):
self.source = source
self.target = target
self.weight = weight
| Edge |
python | kubernetes-client__python | kubernetes/client/models/v1_persistent_volume_claim_list.py | {
"start": 383,
"end": 7370
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1PersistentVolumeClaim]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1PersistentVolumeClaimList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1PersistentVolumeClaimList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1PersistentVolumeClaimList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1PersistentVolumeClaimList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1PersistentVolumeClaimList. # noqa: E501
items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
:return: The items of this V1PersistentVolumeClaimList. # noqa: E501
:rtype: list[V1PersistentVolumeClaim]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1PersistentVolumeClaimList.
items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims # noqa: E501
:param items: The items of this V1PersistentVolumeClaimList. # noqa: E501
:type: list[V1PersistentVolumeClaim]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1PersistentVolumeClaimList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1PersistentVolumeClaimList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1PersistentVolumeClaimList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1PersistentVolumeClaimList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1PersistentVolumeClaimList. # noqa: E501
:return: The metadata of this V1PersistentVolumeClaimList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1PersistentVolumeClaimList.
:param metadata: The metadata of this V1PersistentVolumeClaimList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimList):
return True
return self.to_dict() != other.to_dict()
| V1PersistentVolumeClaimList |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 37858,
"end": 40375
} | class ____(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
# If `default_scale` is provided, we use it, otherwise we use the scale
# of the batch.
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
# apply default scale where there are no observations
scale = torch.where(num_observed > 0, scale, default_scale)
# ensure the scale is at least `self.minimum_scale`
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return scaled_data, torch.zeros_like(scale), scale
# Copied from transformers.models.patchtst.modeling_patchtst.PatchTSTNOPScaler with PatchTST->PatchTSMixer
| PatchTSMixerMeanScaler |
python | ansible__ansible | lib/ansible/module_utils/facts/packages.py | {
"start": 2284,
"end": 2743
} | class ____(PkgMgr):
LIB = None # type: str | None
def __init__(self):
self._lib = None
super(LibMgr, self).__init__()
def is_available(self, handle_exceptions=True):
found = False
try:
self._lib = __import__(self.LIB)
found = True
except ImportError:
if not handle_exceptions:
raise Exception(missing_required_lib(self.LIB))
return found
| LibMgr |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0053_make_external_builds_field_not_null.py | {
"start": 149,
"end": 766
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0052_migrate_null_external_builds_field"),
]
operations = [
migrations.AlterField(
model_name="project",
name="external_builds_enabled",
field=models.BooleanField(
default=False,
help_text='More information in <a href="https://docs.readthedocs.io/en/latest/guides/autobuild-docs-for-pull-requests.html">our docs</a>',
verbose_name="Build pull requests for this project",
),
),
]
| Migration |
python | huggingface__transformers | src/transformers/models/siglip2/processing_siglip2.py | {
"start": 1047,
"end": 1836
} | class ____(ProcessorMixin):
r"""
Constructs a Siglip2 processor which wraps a Siglip2 image processor and a Gemma tokenizer into a single processor.
[`Siglip2Processor`] offers all the functionalities of [`Siglip2ImageProcessor`] and [`GemmaTokenizerFast`]. See the
[`~Siglip2Processor.__call__`] and [`~Siglip2Processor.decode`] for more information.
Args:
image_processor ([`Siglip2ImageProcessor`]):
The image processor is a required input.
tokenizer ([`GemmaTokenizerFast`]):
The tokenizer is a required input.
"""
valid_processor_kwargs = Siglip2ProcessorKwargs
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
__all__ = ["Siglip2Processor"]
| Siglip2Processor |
python | ray-project__ray | python/ray/autoscaler/v2/monitor.py | {
"start": 1637,
"end": 11193
} | class ____:
"""Autoscaling monitor.
This process periodically collects stats from the GCS and triggers
autoscaler updates.
TODO:
We should also handle autoscaler failures properly in the future.
Right now, we don't restart autoscaler if it fails (internal reconciliation
however, should not fail the autoscaler process).
With the Reconciler able to handle extra cloud instances, we could in fact
recover the autoscaler process from reconciliation.
"""
def __init__(
self,
address: str,
config_reader: IConfigReader,
log_dir: Optional[str] = None,
monitor_ip: Optional[str] = None,
):
# Record v2 usage (we do this as early as possible to capture usage)
record_autoscaler_v2_usage(GcsClient(address))
self.gcs_address = address
worker = ray._private.worker.global_worker
# TODO: eventually plumb ClusterID through to here
self.gcs_client = GcsClient(address=self.gcs_address)
if monitor_ip:
monitor_addr = build_address(monitor_ip, AUTOSCALER_METRIC_PORT)
self.gcs_client.internal_kv_put(
b"AutoscalerMetricsAddress", monitor_addr.encode(), True, None
)
self._session_name = self._get_session_name(self.gcs_client)
logger.info(f"session_name: {self._session_name}")
worker.set_mode(SCRIPT_MODE)
head_node_ip = parse_address(self.gcs_address)[0]
self.autoscaler = None
if log_dir:
try:
ray_event_logger = get_event_logger(
RayEvent.SourceType.AUTOSCALER, log_dir
)
self.event_logger = AutoscalerEventLogger(ray_event_logger)
except Exception:
self.event_logger = None
else:
self.event_logger = None
prom_metrics = AutoscalerPrometheusMetrics(session_name=self._session_name)
self.metric_reporter = AutoscalerMetricsReporter(prom_metrics)
if monitor_ip and prometheus_client:
# If monitor_ip wasn't passed in, then don't attempt to start the
# metric server to keep behavior identical to before metrics were
# introduced
try:
logger.info(
"Starting autoscaler metrics server on port {}".format(
AUTOSCALER_METRIC_PORT
)
)
kwargs = {"addr": "127.0.0.1"} if head_node_ip == "127.0.0.1" else {}
prometheus_client.start_http_server(
port=AUTOSCALER_METRIC_PORT,
registry=prom_metrics.registry,
**kwargs,
)
except Exception:
logger.exception(
"An exception occurred while starting the metrics server."
)
elif not prometheus_client:
logger.warning(
"`prometheus_client` not found, so metrics will not be exported."
)
self.autoscaler = Autoscaler(
session_name=self._session_name,
config_reader=config_reader,
gcs_client=self.gcs_client,
event_logger=self.event_logger,
metrics_reporter=self.metric_reporter,
)
@staticmethod
def _get_session_name(gcs_client: GcsClient) -> Optional[str]:
"""Obtain the session name from the GCS.
If the GCS doesn't respond, session name is considered None.
In this case, the metrics reported from the monitor won't have
the correct session name.
"""
session_name = gcs_client.internal_kv_get(
b"session_name",
ray_constants.KV_NAMESPACE_SESSION,
timeout=10,
)
if session_name:
session_name = session_name.decode()
return session_name
@staticmethod
def _report_autoscaling_state(
gcs_client: GcsClient, autoscaling_state: AutoscalingState
):
"""Report the autoscaling state to the GCS."""
try:
gcs_client.report_autoscaling_state(autoscaling_state.SerializeToString())
except Exception:
logger.exception("Error reporting autoscaling state to GCS.")
def _run(self):
"""Run the monitor loop."""
while True:
autoscaling_state = self.autoscaler.update_autoscaling_state()
if autoscaling_state:
# report autoscaling state
self._report_autoscaling_state(self.gcs_client, autoscaling_state)
else:
logger.warning("No autoscaling state to report.")
# Wait for a autoscaler update interval before processing the next
# round of messages.
time.sleep(AUTOSCALER_UPDATE_INTERVAL_S)
def run(self):
try:
self._run()
except Exception:
logger.exception("Error in monitor loop")
raise
def record_autoscaler_v2_usage(gcs_client: GcsClient) -> None:
"""
Record usage for autoscaler v2.
"""
try:
record_extra_usage_tag(TagKey.AUTOSCALER_VERSION, "v2", gcs_client)
except Exception:
logger.exception("Error recording usage for autoscaler v2.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse GCS server for the monitor to connect to.")
)
parser.add_argument(
"--gcs-address", required=False, type=str, help="The address (ip:port) of GCS."
)
parser.add_argument(
"--autoscaling-config",
required=False,
type=str,
help="the path to the autoscaling config file",
)
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP,
)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP,
)
parser.add_argument(
"--logging-filename",
required=False,
type=str,
default=ray_constants.MONITOR_LOG_FILE_NAME,
help="Specify the name of log file, "
"log to stdout if set empty, default is "
f'"{ray_constants.MONITOR_LOG_FILE_NAME}"',
)
parser.add_argument(
"--logs-dir",
required=True,
type=str,
help="Specify the path of the temporary directory used by Ray processes.",
)
parser.add_argument(
"--logging-rotate-bytes",
required=False,
type=int,
default=LOGGING_ROTATE_BYTES,
help="Specify the max bytes for rotating "
"log file, default is "
f"{LOGGING_ROTATE_BYTES} bytes.",
)
parser.add_argument(
"--logging-rotate-backup-count",
required=False,
type=int,
default=LOGGING_ROTATE_BACKUP_COUNT,
help="Specify the backup count of rotated log file, default is "
f"{LOGGING_ROTATE_BACKUP_COUNT}.",
)
parser.add_argument(
"--monitor-ip",
required=False,
type=str,
default=None,
help="The IP address of the machine hosting the monitor process.",
)
parser.add_argument(
"--stdout-filepath",
required=False,
type=str,
default="",
help="The filepath to dump monitor stdout.",
)
parser.add_argument(
"--stderr-filepath",
required=False,
type=str,
default="",
help="The filepath to dump monitor stderr.",
)
args = parser.parse_args()
# Disable log rotation for windows platform.
logging_rotation_bytes = args.logging_rotate_bytes if sys.platform != "win32" else 0
logging_rotation_backup_count = (
args.logging_rotate_backup_count if sys.platform != "win32" else 1
)
setup_component_logger(
logging_level=args.logging_level,
logging_format=args.logging_format,
log_dir=args.logs_dir,
filename=args.logging_filename,
max_bytes=logging_rotation_bytes,
backup_count=logging_rotation_backup_count,
)
# Setup stdout/stderr redirect files if redirection enabled.
logging_utils.redirect_stdout_stderr_if_needed(
args.stdout_filepath,
args.stderr_filepath,
logging_rotation_bytes,
logging_rotation_backup_count,
)
logger.info(
f"Starting autoscaler v2 monitor using ray installation: {ray.__file__}"
)
logger.info(f"Ray version: {ray.__version__}")
logger.info(f"Ray commit: {ray.__commit__}")
logger.info(f"AutoscalerMonitor started with command: {sys.argv}")
gcs_address = args.gcs_address
if gcs_address is None:
raise ValueError("--gcs-address must be set!")
if not args.autoscaling_config:
logger.info("No autoscaling config provided: use read only node provider.")
config_reader = ReadOnlyProviderConfigReader(gcs_address)
else:
autoscaling_config = os.path.expanduser(args.autoscaling_config)
config_reader = FileConfigReader(
config_file=autoscaling_config, skip_content_hash=True
)
monitor = AutoscalerMonitor(
gcs_address,
config_reader,
log_dir=args.logs_dir,
monitor_ip=args.monitor_ip,
)
monitor.run()
| AutoscalerMonitor |
python | getsentry__sentry | src/sentry/tsdb/dummy.py | {
"start": 390,
"end": 4957
} | class ____(BaseTSDB):
"""
A no-op time-series storage.
"""
def incr(self, model, key: TSDBKey, timestamp=None, count=1, environment_id=None):
self.validate_arguments([model], [environment_id])
def merge(self, model, destination, sources, timestamp=None, environment_ids=None):
self.validate_arguments([model], _environment_ids(environment_ids))
def delete(self, models, keys, start=None, end=None, timestamp=None, environment_ids=None):
self.validate_arguments(models, _environment_ids(environment_ids))
def get_range(
self,
model: TSDBModel,
keys: Sequence[TSDBKey],
start: datetime,
end: datetime,
rollup: int | None = None,
environment_ids: Sequence[int] | None = None,
conditions=None,
use_cache: bool = False,
jitter_value: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
referrer_suffix: str | None = None,
group_on_time: bool = True,
aggregation_override: str | None = None,
project_ids: Sequence[int] | None = None,
) -> dict[TSDBKey, list[tuple[int, int]]]:
self.validate_arguments([model], _environment_ids(environment_ids))
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def record(self, model, key, values, timestamp=None, environment_id=None):
self.validate_arguments([model], [environment_id])
def get_distinct_counts_series(
self,
model: TSDBModel,
keys: Sequence[int],
start: datetime,
end: datetime | None = None,
rollup: int | None = None,
environment_id: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
project_ids: Sequence[int] | None = None,
) -> dict[int, list[tuple[int, Any]]]:
self.validate_arguments([model], [environment_id])
_, series = self.get_optimal_rollup_series(start, end, rollup)
return {k: [(ts, 0) for ts in series] for k in keys}
def get_distinct_counts_totals(
self,
model,
keys: Sequence[TSDBKey],
start,
end=None,
rollup=None,
environment_id=None,
use_cache=False,
jitter_value=None,
tenant_ids=None,
referrer_suffix=None,
conditions=None,
group_on_time: bool = False,
project_ids: Sequence[int] | None = None,
):
self.validate_arguments([model], [environment_id])
return {k: 0 for k in keys}
def merge_distinct_counts(
self, model, destination, sources, timestamp=None, environment_ids=None
):
self.validate_arguments([model], _environment_ids(environment_ids))
def delete_distinct_counts(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
self.validate_arguments(models, _environment_ids(environment_ids))
def record_frequency_multi(
self,
requests: Sequence[tuple[TSDBModel, Mapping[str, Mapping[str, int | float]]]],
timestamp=None,
environment_id=None,
):
self.validate_arguments([model for model, request in requests], [environment_id])
def get_frequency_series(
self,
model: TSDBModel,
items: Mapping[TSDBKey, Sequence[TSDBItem]],
start: datetime,
end: datetime | None = None,
rollup: int | None = None,
environment_id: int | None = None,
tenant_ids: dict[str, str | int] | None = None,
project_ids: Sequence[int] | None = None,
) -> dict[TSDBKey, list[tuple[float, dict[TSDBItem, float]]]]:
self.validate_arguments([model], [environment_id])
rollup, series = self.get_optimal_rollup_series(start, end, rollup)
return {
key: [(timestamp, {k: 0.0 for k in members}) for timestamp in series]
for key, members in items.items()
}
def merge_frequencies(
self,
model: TSDBModel,
destination: str,
sources: Sequence[TSDBKey],
timestamp: datetime | None = None,
environment_ids: Iterable[int] | None = None,
) -> None:
self.validate_arguments([model], _environment_ids(environment_ids))
def delete_frequencies(
self, models, keys, start=None, end=None, timestamp=None, environment_ids=None
):
self.validate_arguments(models, _environment_ids(environment_ids))
def flush(self):
pass
| DummyTSDB |
python | faif__python-patterns | patterns/structural/3-tier.py | {
"start": 833,
"end": 2437
} | class ____:
"""UI interaction class"""
def __init__(self) -> None:
self.business_logic = BusinessLogic()
def get_product_list(self) -> None:
print("PRODUCT LIST:")
for product in self.business_logic.product_list():
print(product)
print("")
def get_product_information(self, product: str) -> None:
product_info = self.business_logic.product_information(product)
if product_info:
print("PRODUCT INFORMATION:")
print(
f"Name: {product.title()}, "
+ f"Price: {product_info.get('price', 0):.2f}, "
+ f"Quantity: {product_info.get('quantity', 0):}"
)
else:
print(f"That product '{product}' does not exist in the records")
def main():
"""
>>> ui = Ui()
>>> ui.get_product_list()
PRODUCT LIST:
(Fetching from Data Store)
milk
eggs
cheese
<BLANKLINE>
>>> ui.get_product_information("cheese")
(Fetching from Data Store)
PRODUCT INFORMATION:
Name: Cheese, Price: 2.00, Quantity: 10
>>> ui.get_product_information("eggs")
(Fetching from Data Store)
PRODUCT INFORMATION:
Name: Eggs, Price: 0.20, Quantity: 100
>>> ui.get_product_information("milk")
(Fetching from Data Store)
PRODUCT INFORMATION:
Name: Milk, Price: 1.50, Quantity: 10
>>> ui.get_product_information("arepas")
(Fetching from Data Store)
That product 'arepas' does not exist in the records
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| Ui |
python | plotly__plotly.py | plotly/animation.py | {
"start": 1395,
"end": 1616
} | class ____(NumberValidator):
def __init__(self, plotly_name="duration"):
super(DurationValidator, self).__init__(
plotly_name=plotly_name, parent_name="batch_animate", min=0
)
| DurationValidator |
python | prabhupant__python-ds | data_structures/palindromic_tree/palindromic_tree.py | {
"start": 16,
"end": 179
} | class ____:
def __init__(self):
self.next: typing.Dict[str, Node] = {}
self.frequency = 0
self.length = 0
self.suffix = None
| Node |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/mujoco_rendering.py | {
"start": 24069,
"end": 29803
} | class ____:
"""This is the MuJoCo renderer manager class for every MuJoCo environment.
The class has two main public methods available:
- :meth:`render` - Renders the environment in three possible modes: "human", "rgb_array", or "depth_array"
- :meth:`close` - Closes all contexts initialized with the renderer
"""
def __init__(
self,
model: "mujoco.MjModel",
data: "mujoco.MjData",
default_cam_config: dict | None = None,
width: int | None = None,
height: int | None = None,
max_geom: int = 1000,
camera_id: int | None = None,
camera_name: str | None = None,
visual_options: dict[int, bool] = {},
):
"""A wrapper for clipping continuous actions within the valid bound.
Args:
model: MjModel data structure of the MuJoCo simulation
data: MjData data structure of the MuJoCo simulation
default_cam_config: dictionary with attribute values of the viewer's default camera, https://mujoco.readthedocs.io/en/latest/XMLreference.html?highlight=camera#visual-global
width: width of the OpenGL rendering context
height: height of the OpenGL rendering context
max_geom: maximum number of geometries to render
camera_id: The integer camera id from which to render the frame in the MuJoCo simulation
camera_name: The string name of the camera from which to render the frame in the MuJoCo simulation. This argument should not be passed if using cameara_id instead and vice versa
"""
self.model = model
self.data = data
self._viewers = {}
self.viewer = None
self.default_cam_config = default_cam_config
self.width = width
self.height = height
self.max_geom = max_geom
self._vopt = visual_options
# set self.camera_id using `camera_id` or `camera_name`
if camera_id is not None and camera_name is not None:
raise ValueError(
"Both `camera_id` and `camera_name` cannot be"
" specified at the same time."
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = "track"
if camera_id is None:
self.camera_id = mujoco.mj_name2id(
self.model,
mujoco.mjtObj.mjOBJ_CAMERA,
camera_name,
)
else:
self.camera_id = camera_id
def render(
self,
render_mode: str | None,
):
"""Renders a frame of the simulation in a specific format and camera view.
Args:
render_mode: The format to render the frame, it can be: "human", "rgb_array", "depth_array", or "rgbd_tuple"
Returns:
If render_mode is "rgb_array" or "depth_array" it returns a numpy array in the specified format. "rgbd_tuple" returns a tuple of numpy arrays of the form (rgb, depth). "human" render mode does not return anything.
"""
if render_mode != "human":
assert (
self.width is not None and self.height is not None
), f"The width: {self.width} and height: {self.height} cannot be `None` when the render_mode is not `human`."
viewer = self._get_viewer(render_mode=render_mode)
if render_mode in ["rgb_array", "depth_array", "rgbd_tuple"]:
return viewer.render(render_mode=render_mode, camera_id=self.camera_id)
elif render_mode == "human":
return viewer.render()
def _get_viewer(self, render_mode: str | None):
"""Initializes and returns a viewer class depending on the render_mode
- `WindowViewer` class for "human" render mode
- `OffScreenViewer` class for "rgb_array", "depth_array", or "rgbd_tuple" render mode
"""
self.viewer = self._viewers.get(render_mode)
if self.viewer is None:
if render_mode == "human":
self.viewer = WindowViewer(
self.model,
self.data,
self.width,
self.height,
self.max_geom,
self._vopt,
)
elif render_mode in {"rgb_array", "depth_array", "rgbd_tuple"}:
self.viewer = OffScreenViewer(
self.model,
self.data,
self.width,
self.height,
self.max_geom,
self._vopt,
)
else:
raise AttributeError(
f"Unexpected mode: {render_mode}, expected modes: human, rgb_array, depth_array, or rgbd_tuple"
)
# Add default camera parameters
self._set_cam_config()
self._viewers[render_mode] = self.viewer
if len(self._viewers.keys()) > 1:
# Only one context can be current at a time
self.viewer.make_context_current()
return self.viewer
def _set_cam_config(self):
"""Set the default camera parameters"""
assert self.viewer is not None
if self.default_cam_config is not None:
for key, value in self.default_cam_config.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)
def close(self):
"""Close the OpenGL rendering contexts of all viewer modes"""
for _, viewer in self._viewers.items():
viewer.close()
| MujocoRenderer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass4.py | {
"start": 1015,
"end": 1054
} | class ____:
a: int = 0
@dataclass
| DC6 |
python | sphinx-doc__sphinx | sphinx/roles.py | {
"start": 8324,
"end": 9861
} | class ____(ReferenceRole):
_BASE_URL: Final = 'https://cwe.mitre.org/data/definitions/'
def run(self) -> tuple[list[Node], list[system_message]]:
target_id = f'index-{self.env.new_serialno("index")}'
entries = [
(
'single',
_('Common Weakness Enumeration; CWE %s') % self.target,
target_id,
'',
None,
)
]
index = addnodes.index(entries=entries)
target = nodes.target('', '', ids=[target_id])
self.inliner.document.note_explicit_target(target)
try:
refuri = self.build_uri()
reference = nodes.reference(
'', '', internal=False, refuri=refuri, classes=['cwe']
)
if self.has_explicit_title:
reference += nodes.strong(self.title, self.title)
else:
title = f'CWE {self.title}'
reference += nodes.strong(title, title)
except ValueError:
msg = self.inliner.reporter.error(
__('invalid CWE number %s') % self.target, line=self.lineno
)
prb = self.inliner.problematic(self.rawtext, self.rawtext, msg)
return [prb], [msg]
return [index, target, reference], []
def build_uri(self) -> str:
ret = self.target.partition('#')
if ret[1]:
return f'{CWE._BASE_URL}{int(ret[0])}.html#{ret[2]}'
return f'{CWE._BASE_URL}{int(ret[0])}.html'
| CWE |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 865,
"end": 2661
} | class ____(Exception):
pass
# RegExp to identify potential function names
re_fun_name = re.compile(r'[^\W\d]([\w.]*) *$')
# RegExp to exclude strings with this start from autocalling. In
# particular, all binary operators should be excluded, so that if foo is
# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
# characters '!=()' don't need to be checked for, as the checkPythonChars
# routine explicitly does so, to catch direct calls and rebindings of
# existing names.
# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
# it affects the rest of the group in square brackets.
re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
r'|^is |^not |^in |^and |^or ')
# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
# Handler Check Utilities
def is_shadowed(identifier, ip):
"""Is the given identifier defined in one of the namespaces which shadow
the alias and magic namespaces? Note that an identifier is different
than ifun, because it can not contain a '.' character."""
# This is much safer than calling ofind, which can change state
return (identifier in ip.user_ns \
or identifier in ip.user_global_ns \
or identifier in ip.ns_table['builtin']\
or iskeyword(identifier))
#-----------------------------------------------------------------------------
# Main Prefilter manager
#-----------------------------------------------------------------------------
| PrefilterError |
python | spyder-ide__spyder | spyder/plugins/projects/widgets/projectexplorer.py | {
"start": 4180,
"end": 6895
} | class ____(DirView):
"""Filtered file/directory tree view."""
def __init__(self, parent=None):
"""Initialize the filtered dir view."""
super().__init__(parent)
self.proxymodel = None
self.setup_proxy_model()
self.root_path = None
# ---- Model
def setup_proxy_model(self):
"""Setup proxy model."""
self.proxymodel = ProxyModel(self)
self.proxymodel.setSourceModel(self.fsmodel)
def install_model(self):
"""Install proxy model."""
if self.root_path is not None:
self.setModel(self.proxymodel)
def set_root_path(self, root_path):
"""
Set root path.
Parameters
----------
root_path: str
New path directory.
"""
self.root_path = root_path
self.install_model()
index = self.fsmodel.setRootPath(root_path)
self.proxymodel.setup_filter(self.root_path, [])
self.setRootIndex(self.proxymodel.mapFromSource(index))
def get_index(self, filename):
"""
Return index associated with filename.
Parameters
----------
filename: str
String with the filename.
"""
index = self.fsmodel.index(filename)
if index.isValid() and index.model() is self.fsmodel:
return self.proxymodel.mapFromSource(index)
def set_folder_names(self, folder_names):
"""
Set folder names
Parameters
----------
folder_names: list
List with the folder names.
"""
assert self.root_path is not None
path_list = [
osp.join(self.root_path, dirname) for dirname in folder_names
]
self.proxymodel.setup_filter(self.root_path, path_list)
self.itemDelegate().set_project_dir(self.proxymodel.path_list[0])
def get_filename(self, index):
"""
Return filename from index
Parameters
----------
index: int
Index of the list of filenames
"""
if index:
path = self.fsmodel.filePath(self.proxymodel.mapToSource(index))
return osp.normpath(str(path))
def setup_project_view(self):
"""Setup view for projects."""
for i in [1, 2, 3]:
self.hideColumn(i)
self.setHeaderHidden(True)
# ---- Events
def directory_clicked(self, dirname, index):
if index and index.isValid():
if self.get_conf('single_click_to_open'):
state = not self.isExpanded(index)
else:
state = self.isExpanded(index)
self.setExpanded(index, state)
| FilteredDirView |
python | networkx__networkx | networkx/classes/tests/test_reportviews.py | {
"start": 34955,
"end": 36337
} | class ____(TestDegreeView):
GRAPH = nx.MultiGraph
dview = nx.reportviews.MultiDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 4), (2, 2), (3, 4), (4, 2), (5, 1)])
assert str(dv) == rep
dv = self.G.degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.degree()
rep = "MultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 1
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 2), (3, 4)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 1
assert dv[1] == 4
assert dv[2] == 2
assert dv[3] == 4
dv = self.dview(self.G, weight="foo")
assert dv[0] == 1
assert dv[1] == 7
assert dv[2] == 2
assert dv[3] == 7
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight="foo")
assert dvw == 1
dvw = dv(1, weight="foo")
assert dvw == 7
dvw = dv([2, 3], weight="foo")
assert sorted(dvw) == [(2, 2), (3, 7)]
dvd = dict(dv(weight="foo"))
assert dvd[0] == 1
assert dvd[1] == 7
assert dvd[2] == 2
assert dvd[3] == 7
| TestMultiDegreeView |
python | pyqtgraph__pyqtgraph | pyqtgraph/SRTTransform.py | {
"start": 131,
"end": 5181
} | class ____(QtGui.QTransform):
"""Transform that can always be represented as a combination of 3 matrices: scale * rotate * translate
This transform has no shear; angles are always preserved.
"""
def __init__(self, init=None):
QtGui.QTransform.__init__(self)
self.reset()
if init is None:
return
elif isinstance(init, dict):
self.restoreState(init)
elif isinstance(init, SRTTransform):
self._state = {
'pos': Point(init._state['pos']),
'scale': Point(init._state['scale']),
'angle': init._state['angle']
}
self.update()
elif isinstance(init, QtGui.QTransform):
self.setFromQTransform(init)
elif isinstance(init, QtGui.QMatrix4x4):
self.setFromMatrix4x4(init)
else:
raise Exception("Cannot create SRTTransform from input type: %s" % str(type(init)))
def getScale(self):
return self._state['scale']
def getRotation(self):
return self._state['angle']
def getTranslation(self):
return self._state['pos']
def reset(self):
self._state = {
'pos': Point(0,0),
'scale': Point(1,1),
'angle': 0.0 ## in degrees
}
self.update()
def setFromQTransform(self, tr):
p1 = Point(tr.map(0., 0.))
p2 = Point(tr.map(1., 0.))
p3 = Point(tr.map(0., 1.))
dp2 = Point(p2-p1)
dp3 = Point(p3-p1)
## detect flipped axes
if dp2.angle(dp3, units="radians") > 0:
da = 0
sy = -1.0
else:
da = 0
sy = 1.0
self._state = {
'pos': Point(p1),
'scale': Point(dp2.length(), dp3.length() * sy),
'angle': degrees(atan2(dp2[1], dp2[0])) + da
}
self.update()
def setFromMatrix4x4(self, m):
m = SRTTransform3D.SRTTransform3D(m)
angle, axis = m.getRotation()
if angle != 0 and (axis[0] != 0 or axis[1] != 0 or axis[2] != 1):
print("angle: %s axis: %s" % (str(angle), str(axis)))
raise Exception("Can only convert 4x4 matrix to 3x3 if rotation is around Z-axis.")
self._state = {
'pos': Point(m.getTranslation()),
'scale': Point(m.getScale()),
'angle': angle
}
self.update()
def translate(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
t = Point(*args)
self.setTranslate(self._state['pos']+t)
def setTranslate(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
self._state['pos'] = Point(*args)
self.update()
def scale(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
s = Point(*args)
self.setScale(self._state['scale'] * s)
def setScale(self, *args):
"""Acceptable arguments are:
x, y
[x, y]
Point(x,y)"""
self._state['scale'] = Point(*args)
self.update()
def rotate(self, angle):
"""Rotate the transformation by angle (in degrees)"""
self.setRotate(self._state['angle'] + angle)
def setRotate(self, angle):
"""Set the transformation rotation to angle (in degrees)"""
self._state['angle'] = angle
self.update()
def __truediv__(self, t):
"""A / B == B^-1 * A"""
dt = t.inverted()[0] * self
return SRTTransform(dt)
def __div__(self, t):
return self.__truediv__(t)
def __mul__(self, t):
return SRTTransform(QtGui.QTransform.__mul__(self, t))
def saveState(self):
p = self._state['pos']
s = self._state['scale']
return {'pos': (p[0], p[1]), 'scale': (s[0], s[1]), 'angle': self._state['angle']}
def __reduce__(self):
return SRTTransform, (self.saveState(),)
def restoreState(self, state):
self._state['pos'] = Point(state.get('pos', (0,0)))
self._state['scale'] = Point(state.get('scale', (1.,1.)))
self._state['angle'] = state.get('angle', 0)
self.update()
def update(self):
QtGui.QTransform.reset(self)
## modifications to the transform are multiplied on the right, so we need to reverse order here.
QtGui.QTransform.translate(self, *self._state['pos'])
QtGui.QTransform.rotate(self, self._state['angle'])
QtGui.QTransform.scale(self, *self._state['scale'])
def __repr__(self):
return str(self.saveState())
def matrix(self):
return np.array([[self.m11(), self.m12(), self.m13()],[self.m21(), self.m22(), self.m23()],[self.m31(), self.m32(), self.m33()]])
| SRTTransform |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataform.py | {
"start": 33537,
"end": 36835
} | class ____(GoogleCloudBaseOperator):
"""
Writes new file to specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where files should be created.
:param filepath: Required. Path to file including name of the file relative to workspace root.
:param contents: Required. Content of the file to be written.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"repository_id",
"workspace_id",
"impersonation_chain",
)
def __init__(
self,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
contents: bytes,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.project_id = project_id
self.region = region
self.repository_id = repository_id
self.workspace_id = workspace_id
self.filepath = filepath
self.contents = contents
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> dict:
hook = DataformHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
write_file_response = hook.write_file(
project_id=self.project_id,
region=self.region,
repository_id=self.repository_id,
workspace_id=self.workspace_id,
filepath=self.filepath,
contents=self.contents,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return WriteFileResponse.to_dict(write_file_response)
| DataformWriteFileOperator |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 20337,
"end": 21364
} | class ____(
APIv3Settings, RemoteQuerySetMixin, FlexFieldsMixin, ListModelMixin, GenericViewSet
):
model = RemoteRepository
serializer_class = RemoteRepositorySerializer
filterset_class = RemoteRepositoryFilter
permission_classes = (IsAuthenticated,)
permit_list_expands = ["remote_organization"]
def get_queryset(self):
queryset = (
super()
.get_queryset()
.annotate(
# This field will be used by the serializer.
_admin=Exists(
RemoteRepositoryRelation.objects.filter(
remote_repository=OuterRef("pk"),
user=self.request.user,
admin=True,
)
)
)
)
if is_expanded(self.request, "remote_organization"):
queryset = queryset.select_related("organization")
return queryset.order_by("organization__name", "full_name").distinct()
| RemoteRepositoryViewSet |
python | getsentry__sentry | src/sentry/rules/history/backends/postgres.py | {
"start": 1449,
"end": 8169
} | class ____(RuleHistoryBackend):
def record(
self,
rule: Rule,
group: Group,
event_id: str | None = None,
notification_uuid: str | None = None,
) -> RuleFireHistory | None:
return RuleFireHistory.objects.create(
project=rule.project,
rule=rule,
group=group,
event_id=event_id,
notification_uuid=notification_uuid,
)
def fetch_rule_groups_paginated(
self,
rule: Rule,
start: datetime,
end: datetime,
cursor: Cursor | None = None,
per_page: int = 25,
) -> CursorResult[RuleGroupHistory]:
try:
alert_rule_workflow = AlertRuleWorkflow.objects.get(rule_id=rule.id)
workflow = alert_rule_workflow.workflow
# Performs the raw SQL query with pagination
def data_fn(offset: int, limit: int) -> list[_Result]:
query = """
WITH combined_data AS (
SELECT group_id, date_added, event_id
FROM sentry_rulefirehistory
WHERE rule_id = %s AND date_added >= %s AND date_added < %s
UNION ALL
SELECT group_id, date_added, event_id
FROM workflow_engine_workflowfirehistory
WHERE workflow_id = %s
AND date_added >= %s AND date_added < %s
)
SELECT
group_id as group,
COUNT(*) as count,
MAX(date_added) as last_triggered,
(ARRAY_AGG(event_id ORDER BY date_added DESC))[1] as event_id
FROM combined_data
GROUP BY group_id
ORDER BY count DESC, last_triggered DESC
LIMIT %s OFFSET %s
"""
with connection.cursor() as cursor:
cursor.execute(
query, [rule.id, start, end, workflow.id, start, end, limit, offset]
)
return [
_Result(
group=row[0],
count=row[1],
last_triggered=row[2],
event_id=row[3],
)
for row in cursor.fetchall()
]
result = GenericOffsetPaginator(data_fn=data_fn).get_result(per_page, cursor)
result.results = convert_results(result.results)
return result
except AlertRuleWorkflow.DoesNotExist:
# If no workflow is associated with this rule, just use the original behavior
logger.exception("No workflow associated with rule", extra={"rule_id": rule.id})
pass
rule_filtered_history = RuleFireHistory.objects.filter(
rule=rule,
date_added__gte=start,
date_added__lt=end,
)
# subquery that retrieves row with the largest date in a group for RuleFireHistory
rule_group_max_dates = rule_filtered_history.filter(group=OuterRef("group")).order_by(
"-date_added"
)[:1]
qs = (
rule_filtered_history.select_related("group")
.values("group")
.annotate(count=Count("group"))
.annotate(event_id=NoGroupBySubquery(rule_group_max_dates.values("event_id")))
.annotate(last_triggered=Max("date_added"))
)
return OffsetPaginator(
qs, order_by=("-count", "-last_triggered"), on_results=convert_results
).get_result(per_page, cursor)
def fetch_rule_hourly_stats(
self, rule: Rule, start: datetime, end: datetime
) -> Sequence[TimeSeriesValue]:
start = start.replace(tzinfo=timezone.utc)
end = end.replace(tzinfo=timezone.utc)
existing_data: dict[datetime, TimeSeriesValue] = {}
try:
alert_rule_workflow = AlertRuleWorkflow.objects.get(rule_id=rule.id)
workflow = alert_rule_workflow.workflow
# Use raw SQL to combine data from both tables
with connection.cursor() as db_cursor:
db_cursor.execute(
"""
SELECT
DATE_TRUNC('hour', date_added) as bucket,
COUNT(*) as count
FROM (
SELECT date_added
FROM sentry_rulefirehistory
WHERE rule_id = %s
AND date_added >= %s
AND date_added < %s
UNION ALL
SELECT date_added
FROM workflow_engine_workflowfirehistory
WHERE workflow_id = %s
AND date_added >= %s
AND date_added < %s
) combined_data
GROUP BY DATE_TRUNC('hour', date_added)
ORDER BY bucket
""",
[rule.id, start, end, workflow.id, start, end],
)
results = db_cursor.fetchall()
# Convert raw SQL results to the expected format
existing_data = {row[0]: TimeSeriesValue(row[0], row[1]) for row in results}
except AlertRuleWorkflow.DoesNotExist:
# If no workflow is associated with this rule, just use the original behavior
logger.exception("No workflow associated with rule", extra={"rule_id": rule.id})
pass
if not existing_data:
qs = (
RuleFireHistory.objects.filter(
rule=rule,
date_added__gte=start,
date_added__lt=end,
)
.annotate(bucket=TruncHour("date_added"))
.order_by("bucket")
.values("bucket")
.annotate(count=Count("id"))
)
existing_data = {
row["bucket"]: TimeSeriesValue(row["bucket"], row["count"]) for row in qs
}
# Fill in gaps with zero values for missing hours
results = []
current = start.replace(minute=0, second=0, microsecond=0) + timedelta(hours=1)
while current <= end.replace(minute=0, second=0, microsecond=0):
results.append(existing_data.get(current, TimeSeriesValue(current, 0)))
current += timedelta(hours=1)
return results
| PostgresRuleHistoryBackend |
python | getsentry__sentry | src/sentry/api/bases/organizationmember.py | {
"start": 1840,
"end": 1975
} | class ____(StaffPermissionMixin, MemberPermission):
"""Allows staff to access member endpoints."""
pass
| MemberAndStaffPermission |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 37422,
"end": 40025
} | class ____(BaseAsyncRealtimeConnectionResource):
async def clear(self, *, event_id: str | Omit = omit) -> None:
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
await self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
)
async def commit(self, *, event_id: str | Omit = omit) -> None:
"""
Send this event to commit the user input audio buffer, which will create a new user message item in the conversation. This event will produce an error if the input audio buffer is empty. When in Server VAD mode, the client does not need to send this event, the server will commit the audio buffer automatically.
Committing the input audio buffer will trigger input audio transcription (if enabled in session configuration), but it will not create a response from the model. The server will respond with an `input_audio_buffer.committed` event.
"""
await self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
)
async def append(self, *, audio: str, event_id: str | Omit = omit) -> None:
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. A "commit" will create a new
user message item in the conversation history from the buffer content and clear the buffer.
Input audio transcription (if enabled) will be generated when the buffer is committed.
If VAD is enabled the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually. Input audio noise reduction operates on writes to the audio buffer.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike most other client events, the server will
not send a confirmation response to this event.
"""
await self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
)
)
| AsyncRealtimeInputAudioBufferResource |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/publish/context.py | {
"start": 642,
"end": 743
} | class ____(Enum):
ROLLBACK = "Rollback"
PUBLISH = "Publish"
PROMOTE = "Promote"
| RolloutMode |
python | dask__dask | dask/dataframe/dask_expr/_accessor.py | {
"start": 3076,
"end": 3233
} | class ____(PropertyMap):
def _divisions(self):
# TODO: We can do better here
return (None,) * (self.frame.npartitions + 1)
| PropertyMapIndex |
python | readthedocs__readthedocs.org | readthedocs/oauth/migrations/0010_index_full_name.py | {
"start": 149,
"end": 543
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("oauth", "0009_add_missing_model_change_migrations"),
]
operations = [
migrations.AlterField(
model_name="remoterepository",
name="full_name",
field=models.CharField(db_index=True, max_length=255, verbose_name="Full Name"),
),
]
| Migration |
python | getsentry__sentry | src/sentry/core/endpoints/organization_projects.py | {
"start": 8288,
"end": 8790
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
queryset = Project.objects.filter(organization=organization)
all_projects = queryset.count()
my_projects = queryset.filter(teams__organizationmember__user_id=request.user.id).count()
return Response({"allProjects": all_projects, "myProjects": my_projects})
| OrganizationProjectsCountEndpoint |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/duplicate_bases.py | {
"start": 352,
"end": 418
} | class ____(
A,
A,
B,
):
...
###
# Non-errors.
###
| G4 |
python | dask__distributed | distributed/comm/inproc.py | {
"start": 8786,
"end": 9929
} | class ____(Connector):
def __init__(self, manager):
self.manager = manager
async def connect(self, address, deserialize=True, **connection_args):
listener = self.manager.get_listener_for(address)
if listener is None:
raise OSError(f"no endpoint for inproc address {address!r}")
conn_req = ConnectionRequest(
c2s_q=Queue(),
s2c_q=Queue(),
c_loop=IOLoop.current(),
c_addr=self.manager.new_address(),
conn_event=asyncio.Event(),
)
listener.connect_threadsafe(conn_req)
# Wait for connection acknowledgement
# (do not pretend we're connected if the other comm never gets
# created, for example if the listener was stopped in the meantime)
await conn_req.conn_event.wait()
comm = InProc(
local_addr="inproc://" + conn_req.c_addr,
peer_addr="inproc://" + address,
read_q=conn_req.s2c_q,
write_q=conn_req.c2s_q,
write_loop=listener.loop,
deserialize=deserialize,
)
return comm
| InProcConnector |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area02.py | {
"start": 315,
"end": 1188
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area02.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:G1")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/call_to_print_function_test.py | {
"start": 1338,
"end": 2554
} | class ____(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.autograph_opts = tf.autograph.experimental.Feature.BUILTIN_FUNCTIONS
def test_lone_print(self):
self.assertFunctionMatchesEager(lone_print, 1)
self.assertFunctionMatchesEager(lone_print, np.array([1, 2, 3]))
def test_print_multiple_values(self):
self.assertFunctionMatchesEager(print_multiple_values, 1)
self.assertFunctionMatchesEager(print_multiple_values, np.array([1, 2, 3]))
def test_multiple_prints(self):
self.assertFunctionMatchesEager(multiple_prints, 1, 2)
self.assertFunctionMatchesEager(multiple_prints, np.array([1, 2, 3]), 4)
def test_print_with_nontf_values(self):
self.assertFunctionMatchesEager(print_with_nontf_values, 1)
self.assertFunctionMatchesEager(print_with_nontf_values, np.array([1, 2,
3]))
def test_print_in_cond(self):
self.assertFunctionMatchesEager(print_in_cond, 0)
self.assertFunctionMatchesEager(print_in_cond, 1)
def test_tf_print(self):
self.assertFunctionMatchesEager(tf_print, 0)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | google__jax | tests/pallas/tpu_gmm_test.py | {
"start": 5091,
"end": 11077
} | class ____(jtu.JaxTestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu"]):
self.skipTest("Test requires TPU device.")
super().setUp()
self.key = jax.random.PRNGKey(1234)
def assert_allclose(
self,
out: jnp.ndarray,
expected_out: jnp.ndarray,
*,
atol: float = 1e-5,
rtol: float = 1e-5,
):
self.assertEqual(out.dtype, expected_out.dtype)
np.testing.assert_allclose(
out.astype(jnp.float32),
expected_out.astype(jnp.float32),
atol=atol,
rtol=rtol,
)
def gmm_test(
self,
m: int,
k: int,
n: int,
data: hps.SearchStrategy[hps.DataObject],
interpret: bool = False,
):
seed = data.draw(seed_strategy())
num_groups, _ = data.draw(group_strategy(max_stride=1))
lhs_dtype, rhs_dtype, out_dtype = (
data.draw(hps.sampled_from([jnp.float32, jnp.bfloat16]))
for _ in range(3)
)
transpose_rhs = data.draw(hps.booleans())
key = jax.random.key(seed)
k1, k2 = jax.random.split(key, 2)
lhs = random_dense((m, k), k1, lhs_dtype, limit=1)
rhs = random_dense((num_groups, k, n), k2, rhs_dtype, limit=1)
group_sizes = data.draw(group_sizes_strategy(m=m, num_groups=num_groups))
out, vjpfun = jax.vjp(
partial(
mblx.gmm,
preferred_element_type=out_dtype,
transpose_rhs=transpose_rhs,
interpret=interpret,
),
lhs,
rhs.swapaxes(1, 2) if transpose_rhs else rhs,
group_sizes,
)
def reference_fn(lhs, rhs, group_sizes, preferred_element_type):
rhs = rhs.swapaxes(1, 2) if transpose_rhs else rhs
return reference_gmm(
lhs, rhs, group_sizes, preferred_element_type=preferred_element_type
)
expected_out, reference_vjpfun = jax.vjp(
partial(reference_fn, preferred_element_type=out_dtype),
lhs,
rhs.swapaxes(1, 2) if transpose_rhs else rhs,
group_sizes,
)
self.assertEqual(out.dtype, out_dtype)
self.assertEqual(expected_out.dtype, out_dtype)
atol, rtol = tolerances(lhs_dtype, rhs_dtype, out_dtype)
self.assert_allclose(out, expected_out, atol=atol, rtol=rtol)
cotangent = random_dense((m, n), k1, out_dtype, limit=1)
grad_lhs, grad_rhs, *_ = vjpfun(cotangent)
expected_grad_lhs, expected_grad_rhs, *_ = reference_vjpfun(cotangent)
self.assert_allclose(grad_lhs, expected_grad_lhs, atol=atol, rtol=rtol)
self.assert_allclose(grad_rhs, expected_grad_rhs, atol=atol, rtol=rtol)
@parameterized.parameters(*GROUPED_MATMUL_TESTS)
@hp.given(hps.data())
def test_gmm(
self,
m: int,
k: int,
n: int,
data: hps.SearchStrategy[hps.DataObject],
):
self.gmm_test(m, k, n, data)
# NOTE: Run fewer tests with interpret mode. We just want to sanity check that
# changes do not break running these kernels with interpret=True.
@parameterized.parameters(*GROUPED_MATMUL_TESTS[0:1])
@hp.given(hps.data())
def test_gmm_interpret(
self,
m: int,
k: int,
n: int,
data: hps.SearchStrategy[hps.DataObject],
):
self.skipTest("interpret mode with dynamic grids is unsupported")
self.gmm_test(
m,
k,
n,
data=data,
interpret=True,
)
@parameterized.parameters(*GROUPED_MATMUL_TESTS)
@hp.given(hps.data())
def test_gmm_sharded_groups(
self,
m: int,
k: int,
n: int,
data: hps.SearchStrategy[hps.DataObject],
):
seed = data.draw(seed_strategy())
num_groups, group_stride = data.draw(group_strategy())
lhs_dtype, rhs_dtype, out_dtype = (
data.draw(hps.sampled_from([jnp.float32, jnp.bfloat16]))
for _ in range(3)
)
key = jax.random.key(seed)
k1, k2 = jax.random.split(key, 2)
lhs = random_dense((m, k), k1, lhs_dtype, limit=1)
rhs = random_dense((num_groups, k, n), k2, rhs_dtype, limit=1)
group_sizes = data.draw(group_sizes_strategy(m=m, num_groups=num_groups))
out, shard_vjpfun = jax.vjp(
partial(mblx.gmm, preferred_element_type=out_dtype),
lhs,
rhs[0:group_stride],
group_sizes,
)
vjpfuns = [shard_vjpfun]
for group_offset in range(group_stride, num_groups, group_stride):
out, shard_vjpfun = jax.vjp(
lambda lhs, rhs, group_sizes, out: mblx.gmm(
lhs,
rhs,
group_sizes,
out_dtype,
group_offset=jnp.array(group_offset, dtype=jnp.int32), # pylint: disable=cell-var-from-loop
existing_out=out,
),
lhs,
rhs[group_offset : group_offset + group_stride],
group_sizes,
out,
)
vjpfuns.append(shard_vjpfun)
expected_out, reference_vjpfun = jax.vjp(
partial(reference_gmm, preferred_element_type=out_dtype),
lhs,
rhs,
group_sizes,
)
self.assertEqual(out.dtype, out_dtype)
self.assertEqual(expected_out.dtype, out_dtype)
atol, rtol = tolerances(lhs_dtype, rhs_dtype, out_dtype)
self.assert_allclose(out, expected_out, atol=atol, rtol=rtol)
cotangent = random_dense((m, n), k1, out_dtype, limit=1)
shard_grad_lhs, shard_grad_rhs, *_ = vjpfuns[0](cotangent)
grad_lhs = shard_grad_lhs
grad_rhs = [shard_grad_rhs]
for i, group_offset in enumerate(
range(group_stride, num_groups, group_stride)
):
shard_grad_lhs, shard_grad_rhs, *_ = vjpfuns[i + 1](cotangent)
grad_lhs += shard_grad_lhs
grad_rhs.append(shard_grad_rhs)
grad_rhs = jnp.concatenate(grad_rhs, axis=0)
expected_grad_lhs, expected_grad_rhs, *_ = reference_vjpfun(cotangent)
self.assert_allclose(grad_lhs, expected_grad_lhs, atol=atol, rtol=rtol)
self.assert_allclose(grad_rhs, expected_grad_rhs, atol=atol, rtol=rtol)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| GroupedMatmulTest |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 18581,
"end": 20648
} | class ____:
def test_put(self):
q = np.arange(3.0) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
def test_putmask(self):
q = np.arange(3.0) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.0)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_place(self):
q = np.arange(3.0) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_copyto(self):
q = np.arange(3.0) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.0)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1.0, 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.0).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25.0 * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
| TestSettingParts |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_grad_acc.py | {
"start": 1838,
"end": 2284
} | class ____:
"""
This wraps a :class:`list` of :class:`_GradAccConfig` instances with the
sole purpose of overriding :meth:`__repr__` to remove spaces.
"""
configs: list[_GradAccConfig]
def __repr__(self) -> str:
# Override to remove any spaces in the string to appease the internal
# build's test name parser
return "[" + ",".join(config.__repr__() for config in self.configs) + "]"
| _GradAccConfigs |
python | tox-dev__tox | src/tox/config/loader/api.py | {
"start": 523,
"end": 1680
} | class ____: # noqa: PLW1641
"""An override for config definitions."""
def __init__(self, value: str) -> None:
key, equal, self.value = value.partition("=")
if not equal:
msg = f"override {value} has no = sign in it"
raise ArgumentTypeError(msg)
self.append = False
if key.endswith("+"): # key += value appends to a list
key = key[:-1]
self.append = True
self.namespace, _, self.key = key.rpartition(".")
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self}')"
def __str__(self) -> str:
return f"{self.namespace}{'.' if self.namespace else ''}{self.key}={self.value}"
def __eq__(self, other: object) -> bool:
if type(self) != type(other): # noqa: E721
return False
return (self.namespace, self.key, self.value) == (
other.namespace, # type: ignore[attr-defined]
other.key, # type: ignore[attr-defined]
other.value, # type: ignore[attr-defined]
)
def __ne__(self, other: object) -> bool:
return not (self == other)
| Override |
python | google__jax | tests/array_interoperability_test.py | {
"start": 4775,
"end": 10079
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu", "gpu"]):
self.skipTest(f"DLPack not supported on {jtu.device_under_test()}")
@jtu.sample_product(
shape=all_shapes,
dtype=dlpack_dtypes + extra_dlpack_dtypes,
copy=[False, True, None],
use_stream=[False, True],
)
@jtu.run_on_devices("gpu")
def testJaxRoundTrip(self, shape, dtype, copy, use_stream):
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
# Check if the source device is preserved
x = jax.device_put(np, jax.devices("cpu")[0])
device = jax.devices("gpu")[0]
y = jax.device_put(x, device)
# TODO(parkers): Remove after setting 'stream' properly below.
jax.block_until_ready(y)
z = jax.dlpack.from_dlpack(y)
self.assertEqual(z.devices(), {device})
self.assertAllClose(np.astype(x.dtype), z)
@jtu.sample_product(
shape=all_shapes,
dtype=dlpack_dtypes,
gpu=[False, True],
)
def testJaxArrayRoundTrip(self, shape, dtype, gpu):
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
if gpu and jax.default_backend() == "cpu":
raise unittest.SkipTest("Skipping GPU test case on CPU")
device = jax.devices("gpu" if gpu else "cpu")[0]
x = jax.device_put(np, device)
# TODO(parkers): Remove after setting 'stream' properly.
jax.block_until_ready(x)
y = jax.dlpack.from_dlpack(x)
self.assertEqual(y.devices(), {device})
self.assertAllClose(np.astype(x.dtype), y)
# Test we can create multiple arrays
z = jax.dlpack.from_dlpack(x)
self.assertEqual(z.devices(), {device})
self.assertAllClose(np.astype(x.dtype), z)
@jtu.sample_product(shape=all_shapes, dtype=dlpack_dtypes)
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testTensorFlowToJax(self, shape, dtype):
if (not config.enable_x64.value and
dtype in [jnp.int64, jnp.uint64, jnp.float64]):
raise self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.test_device_matches(["gpu"]) and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
if jtu.test_device_matches(["gpu"]) and dtype == jnp.int32:
raise self.skipTest("TensorFlow does not place int32 tensors on GPU")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
with tf.device("/GPU:0" if jtu.test_device_matches(["gpu"]) else "/CPU:0"):
x = tf.identity(tf.constant(np))
y = jax.dlpack.from_dlpack(x)
self.assertAllClose(np, y)
@jtu.sample_product(
shape=all_shapes,
dtype=dlpack_dtypes,
)
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testJaxToTensorFlow(self, shape, dtype):
if (not config.enable_x64.value and
dtype in [jnp.int64, jnp.uint64, jnp.float64]):
self.skipTest("x64 types are disabled by jax_enable_x64")
if (jtu.test_device_matches(["gpu"]) and
not tf.config.list_physical_devices("GPU")):
raise self.skipTest("TensorFlow not configured with GPU support")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
# TODO(parkers): Remove after setting 'stream' properly.
jax.block_until_ready(x)
# TODO(b/171320191): this line works around a missing context initialization
# bug in TensorFlow.
_ = tf.add(1, 1)
y = tf.experimental.dlpack.from_dlpack(x.__dlpack__())
self.assertAllClose(np, y.numpy())
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testTensorFlowToJaxInt64(self):
# See https://github.com/jax-ml/jax/issues/11895
x = jax.dlpack.from_dlpack(tf.ones((2, 3), tf.int64))
dtype_expected = jnp.int64 if config.enable_x64.value else jnp.int32
self.assertEqual(x.dtype, dtype_expected)
@unittest.skipIf(not tf, "Test requires TensorFlow")
def testTensorFlowToJaxNondefaultLayout(self):
x = tf.transpose(np.arange(4).reshape(2, 2))
self.assertAllClose(x.numpy(), jax.dlpack.from_dlpack(x))
@jtu.sample_product(
shape=all_shapes,
dtype=numpy_dtypes,
copy=[False, True, None],
aligned=[False, True],
)
def testNumpyToJax(self, shape, dtype, copy, aligned):
rng = jtu.rand_default(self.rng())
x_np = rng(shape, dtype)
device = jax.devices()[0]
alignment = _get_max_align_bits(dtype, device) if aligned else 2
x_np = _ensure_alignment(x_np, desired_alignment=alignment)
_from_dlpack = lambda: jnp.from_dlpack(x_np, device=device, copy=copy)
if copy is not None and not copy and (jax.default_backend() != "cpu"
or not aligned):
self.assertRaisesRegex(
ValueError, "Specified .* which requires a copy", _from_dlpack
)
else:
self.assertAllClose(x_np, _from_dlpack())
def testNumpyToJaxNondefaultLayout(self):
x = np.arange(4).reshape(2, 2).T
self.assertAllClose(x, jax.dlpack.from_dlpack(x))
@jtu.sample_product(shape=all_shapes, dtype=numpy_dtypes)
@jtu.run_on_devices("cpu") # NumPy only accepts cpu DLPacks
def testJaxToNumpy(self, shape, dtype):
rng = jtu.rand_default(self.rng())
x_jax = jnp.array(rng(shape, dtype))
x_np = np.from_dlpack(x_jax)
self.assertAllClose(x_np, x_jax)
| DLPackTest |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 56152,
"end": 63711
} | class ____(BartPreTrainedModel):
def __init__(self, config: BartConfig, **kwargs):
super().__init__(config, **kwargs)
self.model = BartModel(config)
self.classification_head = BartClassificationHead(
config.d_model,
config.d_model,
config.num_labels,
config.classifier_dropout,
)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self, missing_keys: Optional[set[str]] = None, recompute_mapping: bool = True):
"""We need to overload here to handle the wrong key saved in some main checkpoints."""
if self.config.tie_word_embeddings:
# Some model checkpoints like "facebook/bart-large-cnn"'s embedding weight is in decoder.embed_tokens,
# need check here, see issue #36247
if missing_keys is not None:
if "model.shared.weight" in missing_keys and "model.decoder.embed_tokens.weight" not in missing_keys:
self.model.encoder.embed_tokens.weight = self.model.decoder.embed_tokens.weight
self.model.shared.weight = self.model.decoder.embed_tokens.weight
missing_keys.discard("model.encoder.embed_token.weight")
missing_keys.discard("model.shared.weight")
# needs to be done after, otherwise it raises an Error because the correct weights are not present
super().tie_weights(missing_keys=missing_keys, recompute_mapping=recompute_mapping)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Seq2SeqSequenceClassifierOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0] # last hidden state
eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
:, -1, :
]
logits = self.classification_head(sentence_representation)
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.config.num_labels == 1:
self.config.problem_type = "regression"
elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.config.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSequenceClassifierOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@auto_docstring
| BartForSequenceClassification |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed7.py | {
"start": 1247,
"end": 1502
} | class ____(TypedDict, closed=True):
a: NotRequired[ReadOnly[int]]
td5: TD5 = {"a": 1}
# This should generate an error because some elements are ReadOnly.
td5.clear()
# This should generate an error because some elements are ReadOnly.
td5.popitem()
| TD5 |
python | PrefectHQ__prefect | tests/utilities/test_callables.py | {
"start": 23177,
"end": 24071
} | class ____:
def test_no_error_if_no_variadic_parameter(self):
def foo(a, b):
pass
parameters = {"a": 1, "b": 2}
new_params = callables.explode_variadic_parameter(foo, parameters)
assert parameters == new_params
def test_no_error_if_variadic_parameter_and_kwargs_provided(self):
def foo(a, b, **kwargs):
pass
parameters = {"a": 1, "b": 2, "kwargs": {"c": 3, "d": 4}}
new_params = callables.explode_variadic_parameter(foo, parameters)
assert new_params == {"a": 1, "b": 2, "c": 3, "d": 4}
def test_no_error_if_variadic_parameter_and_no_kwargs_provided(self):
def foo(a, b, **kwargs):
pass
parameters = {"a": 1, "b": 2}
new_params = callables.explode_variadic_parameter(foo, parameters)
assert new_params == parameters
| TestExplodeVariadicParameter |
python | TheAlgorithms__Python | maths/prime_check.py | {
"start": 1353,
"end": 2261
} | class ____(unittest.TestCase):
def test_primes(self):
assert is_prime(2)
assert is_prime(3)
assert is_prime(5)
assert is_prime(7)
assert is_prime(11)
assert is_prime(13)
assert is_prime(17)
assert is_prime(19)
assert is_prime(23)
assert is_prime(29)
def test_not_primes(self):
with pytest.raises(ValueError):
is_prime(-19)
assert not is_prime(0), (
"Zero doesn't have any positive factors, primes must have exactly two."
)
assert not is_prime(1), (
"One only has 1 positive factor, primes must have exactly two."
)
assert not is_prime(2 * 2)
assert not is_prime(2 * 3)
assert not is_prime(3 * 3)
assert not is_prime(3 * 5)
assert not is_prime(3 * 5 * 7)
if __name__ == "__main__":
unittest.main()
| Test |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/criteria/eval_chain.py | {
"start": 5737,
"end": 18478
} | class ____(StringEvaluator, LLMEvalChain, LLMChain):
r"""LLM Chain for evaluating runs against criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : Union[Mapping[str, str]]
The criteria or rubric to evaluate the runs against. It can be a mapping of
criterion name to its description, or a single criterion name.
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided, a
default prompt template will be used based on the value of
`requires_reference`.
requires_reference : bool, default=False
Whether the evaluation requires a reference text. If `True`, the
`PROMPT_WITH_REFERENCES` template will be used, which includes the
reference labels in the prompt. Otherwise, the `PROMPT` template will be
used, which is a reference-free prompt.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain` constructor.
Returns:
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples:
--------
>>> from langchain_anthropic import ChatAnthropic
>>> from langchain_classic.evaluation.criteria import CriteriaEvalChain
>>> model = ChatAnthropic(temperature=0)
>>> criteria = {"my-custom-criterion": "Is the submission the most amazing ever?"}
>>> evaluator = CriteriaEvalChain.from_llm(llm=model, criteria=criteria)
>>> evaluator.evaluate_strings(
... prediction="Imagine an ice cream flavor for the color aquamarine",
... input="Tell me an idea",
... )
{
'reasoning': 'Here is my step-by-step reasoning for the given criteria:\n\nThe criterion is: "Is the submission the most amazing ever?" This is a subjective criterion and open to interpretation. The submission suggests an aquamarine-colored ice cream flavor which is creative but may or may not be considered the most amazing idea ever conceived. There are many possible amazing ideas and this one ice cream flavor suggestion may or may not rise to that level for every person. \n\nN',
'value': 'N',
'score': 0,
}
>>> from langchain_openai import ChatOpenAI
>>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain
>>> model = ChatOpenAI(model="gpt-4", temperature=0)
>>> criteria = "correctness"
>>> evaluator = LabeledCriteriaEvalChain.from_llm(
... llm=model,
... criteria=criteria,
... )
>>> evaluator.evaluate_strings(
... prediction="The answer is 4",
... input="How many apples are there?",
... reference="There are 3 apples",
... )
{
'score': 0,
'reasoning': 'The criterion for this task is the correctness of the submission. The submission states that there are 4 apples, but the reference indicates that there are actually 3 apples. Therefore, the submission is not correct, accurate, or factual according to the given criterion.\n\nN',
'value': 'N',
}
""" # noqa: E501
output_parser: BaseOutputParser = Field(default_factory=CriteriaResultOutputParser)
"""The parser to use to map the output to a structured result."""
criterion_name: str
"""The name of the criterion being evaluated."""
output_key: str = "results"
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
model_config = ConfigDict(
extra="ignore",
)
@property
def requires_reference(self) -> bool:
"""Whether the evaluation requires a reference text."""
return False
@property
@override
def requires_input(self) -> bool:
return True
@property
def evaluation_name(self) -> str:
"""Get the name of the evaluation.
Returns:
-------
str
The name of the evaluation.
"""
return self.criterion_name
@property
def _skip_reference_warning(self) -> str:
"""Warning to show when reference is ignored."""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use references, use the labeled_criteria instead."
)
@classmethod
def _resolve_prompt(
cls,
prompt: BasePromptTemplate | None = None,
) -> BasePromptTemplate:
expected_input_vars = {"input", "output", "criteria"}
prompt_ = prompt or PROMPT
if expected_input_vars != set(prompt_.input_variables):
msg = (
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
raise ValueError(msg)
return prompt_
@classmethod
def resolve_criteria(
cls,
criteria: CRITERIA_TYPE | str | None,
) -> dict[str, str]:
"""Resolve the criteria to evaluate.
Parameters
----------
criteria : CRITERIA_TYPE
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
Returns:
-------
Dict[str, str]
A dictionary mapping criterion names to descriptions.
Examples:
--------
>>> criterion = "relevance"
>>> CriteriaEvalChain.resolve_criteria(criteria)
{'relevance': 'Is the submission referring to a real quote from the text?'}
"""
return resolve_criteria(criteria)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
criteria: CRITERIA_TYPE | None = None,
*,
prompt: BasePromptTemplate | None = None,
**kwargs: Any,
) -> CriteriaEvalChain:
"""Create a `CriteriaEvalChain` instance from an llm and criteria.
Parameters
----------
llm : BaseLanguageModel
The language model to use for evaluation.
criteria : CRITERIA_TYPE - default=None for "helpfulness"
The criteria to evaluate the runs against. It can be:
- a mapping of a criterion name to its description
- a single criterion name present in one of the default criteria
- a single `ConstitutionalPrinciple` instance
prompt : Optional[BasePromptTemplate], default=None
The prompt template to use for generating prompts. If not provided,
a default prompt template will be used.
**kwargs : Any
Additional keyword arguments to pass to the `LLMChain`
constructor.
Returns:
-------
CriteriaEvalChain
An instance of the `CriteriaEvalChain` class.
Examples:
--------
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import LabeledCriteriaEvalChain
>>> model = OpenAI()
>>> criteria = {
"hallucination": (
"Does this submission contain information"
" not present in the input or reference?"
),
}
>>> chain = LabeledCriteriaEvalChain.from_llm(
llm=model,
criteria=criteria,
)
"""
prompt_ = cls._resolve_prompt(prompt)
if criteria == Criteria.CORRECTNESS:
msg = (
"Correctness should not be used in the reference-free"
" 'criteria' evaluator (CriteriaEvalChain)."
" Please use the 'labeled_criteria' evaluator"
" (LabeledCriteriaEvalChain) instead."
)
raise ValueError(msg)
criteria_ = cls.resolve_criteria(criteria)
criteria_str = "\n".join(f"{k}: {v}" for k, v in criteria_.items())
prompt_ = prompt_.partial(criteria=criteria_str)
return cls(
llm=llm,
prompt=prompt_,
criterion_name="-".join(criteria_),
**kwargs,
)
def _get_eval_input(
self,
prediction: str,
reference: str | None,
input_: str | None,
) -> dict:
"""Get the evaluation input."""
input_dict = {
"input": input_,
"output": prediction,
}
if self.requires_reference:
input_dict["reference"] = reference
return input_dict
def _prepare_output(self, result: dict) -> dict:
"""Prepare the output."""
parsed = result[self.output_key]
if RUN_KEY in result:
parsed[RUN_KEY] = result[RUN_KEY]
return parsed
@override
def _evaluate_strings(
self,
*,
prediction: str,
reference: str | None = None,
input: str | None = None,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate a prediction against the criteria.
Args:
prediction: The predicted text to evaluate.
reference: The reference text to compare against. This is required if
`requires_reference` is `True`.
input: The input text used to generate the prediction.
callbacks: The callbacks to use.
tags: The tags to apply.
metadata: The metadata to use.
include_run_info: Whether to include run info in the output.
**kwargs: Additional keyword arguments to pass to the `LLMChain` `__call__`
method.
Returns:
The evaluation results.
Examples:
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import CriteriaEvalChain
>>> model = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria)
>>> chain.evaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = self(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
@override
async def _aevaluate_strings(
self,
*,
prediction: str,
reference: str | None = None,
input: str | None = None,
callbacks: Callbacks = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a prediction against the criteria.
Args:
prediction: The predicted text to evaluate.
reference: The reference text to compare against. This is required if
`requires_reference` is `True`.
input: The input text used to generate the prediction.
callbacks: The callbacks to use.
tags: The tags to apply.
metadata: The metadata to use.
include_run_info: Whether to include run info in the output.
**kwargs: Additional keyword arguments to pass to the `LLMChain` `__call__`
method.
Returns:
The evaluation results.
Examples:
>>> from langchain_openai import OpenAI
>>> from langchain_classic.evaluation.criteria import CriteriaEvalChain
>>> model = OpenAI()
>>> criteria = "conciseness"
>>> chain = CriteriaEvalChain.from_llm(llm=model, criteria=criteria)
>>> await chain.aevaluate_strings(
prediction="The answer is 42.",
reference="42",
input="What is the answer to life, the universe, and everything?",
)
"""
input_ = self._get_eval_input(prediction, reference, input)
result = await self.acall(
input_,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
)
return self._prepare_output(result)
| CriteriaEvalChain |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_C.py | {
"start": 7477,
"end": 8958
} | class ____(Benchmark):
r"""
Colville objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Colville}}(x) = \left(x_{1} -1\right)^{2}
+ 100 \left(x_{1}^{2} - x_{2}\right)^{2}
+ 10.1 \left(x_{2} -1\right)^{2} + \left(x_{3} -1\right)^{2}
+ 90 \left(x_{3}^{2} - x_{4}\right)^{2}
+ 10.1 \left(x_{4} -1\right)^{2} + 19.8 \frac{x_{4} -1}{x_{2}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., 4`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO docstring equation is wrong use Jamil#36
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[0] - x[1] ** 2) ** 2
+ (1 - x[0]) ** 2 + (1 - x[2]) ** 2
+ 90 * (x[3] - x[2] ** 2) ** 2
+ 10.1 * ((x[1] - 1) ** 2 + (x[3] - 1) ** 2)
+ 19.8 * (x[1] - 1) * (x[3] - 1))
| Colville |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 21883,
"end": 23589
} | class ____:
def test_structured_sensitive_fields_always_masked(self):
secrets_masker = SecretsMasker()
configure_secrets_masker_for_test(secrets_masker, min_length=5)
short_password = "pwd"
short_token = "tk"
short_api_key = "key"
test_data = {
"password": short_password,
"api_key": short_token,
"connection": {"secret": short_api_key},
}
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
redacted_data = redact(test_data)
assert redacted_data["password"] == "***"
assert redacted_data["api_key"] == "***"
assert redacted_data["connection"]["secret"] == "***"
def test_unstructured_text_min_length_enforced(self):
secrets_masker = SecretsMasker()
min_length = 5
configure_secrets_masker_for_test(secrets_masker, min_length=min_length)
short_secret = "abc"
long_secret = "abcdef"
with patch(
"airflow_shared.secrets_masker.secrets_masker._secrets_masker", return_value=secrets_masker
):
secrets_masker.add_mask(short_secret)
secrets_masker.add_mask(long_secret)
assert short_secret not in secrets_masker.patterns
assert long_secret in secrets_masker.patterns
test_data = f"Containing {short_secret} and {long_secret}"
redacted = secrets_masker.redact(test_data)
assert short_secret in redacted
assert long_secret not in redacted
assert "***" in redacted
| TestStructuredVsUnstructuredMasking |
python | prabhupant__python-ds | data_structures/binary_trees/height_of_tree.py | {
"start": 94,
"end": 614
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def height(root):
if not root:
return 0
else:
lheight = height(root.left)
rheight = height(root.right)
return 1 + max(lheight, rheight)
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(7)
root.left.right = Node(6)
root.right.left = Node(5)
root.right.right = Node(4)
root.right.right.right = Node(40)
print(height(root)) | Node |
python | pypa__setuptools | setuptools/errors.py | {
"start": 1988,
"end": 3024
} | class ____(BaseError, RuntimeError): # type: ignore[valid-type, misc] # distutils imports are `Any` on python 3.12+
"""Impossible to perform automatic discovery of packages and/or modules.
The current project layout or given discovery options can lead to problems when
scanning the project directory.
Setuptools might also refuse to complete auto-discovery if an error prone condition
is detected (e.g. when a project is organised as a flat-layout but contains
multiple directories that can be taken as top-level packages inside a single
distribution [*]_). In these situations the users are encouraged to be explicit
about which packages to include or to make the discovery parameters more specific.
.. [*] Since multi-package distributions are uncommon it is very likely that the
developers did not intend for all the directories to be packaged, and are just
leaving auxiliary code in the repository top-level, such as maintenance-related
scripts.
"""
| PackageDiscoveryError |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/map_test.py | {
"start": 4842,
"end": 4967
} | class ____:
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
@dataclasses.dataclass
| Foo |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/profile_analyzer_cli.py | {
"start": 7557,
"end": 29557
} | class ____(object):
"""Analyzer for profiling data."""
def __init__(self, graph, run_metadata):
"""ProfileAnalyzer constructor.
Args:
graph: (tf.Graph) Python graph object.
run_metadata: A `RunMetadata` protobuf object.
Raises:
ValueError: If run_metadata is None.
"""
self._graph = graph
if not run_metadata:
raise ValueError("No RunMetadata passed for profile analysis.")
self._run_metadata = run_metadata
self._arg_parsers = {}
ap = argparse.ArgumentParser(
description="List nodes profile information.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="filter op type by regex.")
# TODO(annarev): allow file filtering at non-stack top position.
ap.add_argument(
"-f",
"--file_path_filter",
dest="file_path_filter",
type=str,
default="",
help="filter by file name at the top position of node's creation "
"stack that does not belong to TensorFlow library.")
ap.add_argument(
"--min_lineno",
dest="min_lineno",
type=int,
default=-1,
help="(Inclusive) lower bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"--max_lineno",
dest="max_lineno",
type=int,
default=-1,
help="(Exclusive) upper bound for 1-based line number in source file. "
"If <= 0, has no effect.")
ap.add_argument(
"-e",
"--execution_time",
dest="execution_time",
type=str,
default="",
help="Filter by execution time interval "
"(includes compute plus pre- and post -processing time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-o",
"--op_time",
dest="op_time",
type=str,
default="",
help="Filter by op time interval (only includes compute time). "
"Supported units are s, ms and us (default). "
"E.g. -e >100s, -e <100, -e [100us,1000ms]")
ap.add_argument(
"-s",
"--sort_by",
dest="sort_by",
type=str,
default=SORT_OPS_BY_START_TIME,
help=("the field to sort the data by: (%s)" %
" | ".join([SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE])))
ap.add_argument(
"-r",
"--reverse",
dest="reverse",
action="store_true",
help="sort the data in reverse (descending) order")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
self._arg_parsers["list_profile"] = ap
ap = argparse.ArgumentParser(
description="Print a Python source file with line-level profile "
"information",
usage=argparse.SUPPRESS)
ap.add_argument(
"source_file_path",
type=str,
help="Path to the source_file_path")
ap.add_argument(
"--cost_type",
type=str,
choices=["exec_time", "op_time"],
default="exec_time",
help="Type of cost to display")
ap.add_argument(
"--time_unit",
dest="time_unit",
type=str,
default=cli_shared.TIME_UNIT_US,
help="Time unit (" + " | ".join(cli_shared.TIME_UNITS) + ")")
ap.add_argument(
"-d",
"--%s" % _DEVICE_NAME_FILTER_FLAG,
dest=_DEVICE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter device name by regex.")
ap.add_argument(
"-n",
"--%s" % _NODE_NAME_FILTER_FLAG,
dest=_NODE_NAME_FILTER_FLAG,
type=str,
default="",
help="Filter node name by regex.")
ap.add_argument(
"-t",
"--%s" % _OP_TYPE_FILTER_FLAG,
dest=_OP_TYPE_FILTER_FLAG,
type=str,
default="",
help="Filter op type by regex.")
ap.add_argument(
"--init_line",
dest="init_line",
type=int,
default=0,
help="The 1-based line number to scroll to initially.")
self._arg_parsers["print_source"] = ap
def list_profile(self, args, screen_info=None):
"""Command handler for list_profile.
List per-operation profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
screen_cols = 80
if screen_info and "cols" in screen_info:
screen_cols = screen_info["cols"]
parsed = self._arg_parsers["list_profile"].parse_args(args)
op_time_interval = (command_parser.parse_time_interval(parsed.op_time)
if parsed.op_time else None)
exec_time_interval = (
command_parser.parse_time_interval(parsed.execution_time)
if parsed.execution_time else None)
node_name_regex = (re.compile(parsed.node_name_filter)
if parsed.node_name_filter else None)
file_path_regex = (re.compile(parsed.file_path_filter)
if parsed.file_path_filter else None)
op_type_regex = (re.compile(parsed.op_type_filter)
if parsed.op_type_filter else None)
output = debugger_cli_common.RichTextLines([""])
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if not device_name_regex or device_name_regex.match(device_stats.device):
profile_data = [
datum for datum in data_generator(device_stats)
if _list_profile_filter(
datum, node_name_regex, file_path_regex, op_type_regex,
op_time_interval, exec_time_interval,
min_lineno=parsed.min_lineno, max_lineno=parsed.max_lineno)]
profile_data = sorted(
profile_data,
key=lambda datum: _list_profile_sort_key(datum, parsed.sort_by),
reverse=parsed.reverse)
output.extend(
self._get_list_profile_lines(
device_stats.device, index, device_count,
profile_data, parsed.sort_by, parsed.reverse, parsed.time_unit,
device_name_filter=parsed.device_name_filter,
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter,
screen_cols=screen_cols))
return output
def _get_profile_data_generator(self):
"""Get function that generates `ProfileDatum` objects.
Returns:
A function that generates `ProfileDatum` objects.
"""
node_to_file_path = {}
node_to_line_number = {}
node_to_func_name = {}
node_to_op_type = {}
for op in self._graph.get_operations():
for trace_entry in reversed(op.traceback):
file_path = trace_entry[0]
line_num = trace_entry[1]
func_name = trace_entry[2]
if not source_utils.guess_is_tensorflow_py_library(file_path):
break
node_to_file_path[op.name] = file_path
node_to_line_number[op.name] = line_num
node_to_func_name[op.name] = func_name
node_to_op_type[op.name] = op.type
def profile_data_generator(device_step_stats):
for node_stats in device_step_stats.node_stats:
if node_stats.node_name == "_SOURCE" or node_stats.node_name == "_SINK":
continue
yield profiling.ProfileDatum(
device_step_stats.device,
node_stats,
node_to_file_path.get(node_stats.node_name, ""),
node_to_line_number.get(node_stats.node_name, 0),
node_to_func_name.get(node_stats.node_name, ""),
node_to_op_type.get(node_stats.node_name, ""))
return profile_data_generator
def _get_list_profile_lines(
self, device_name, device_index, device_count,
profile_datum_list, sort_by, sort_reverse, time_unit,
device_name_filter=None, node_name_filter=None, op_type_filter=None,
screen_cols=80):
"""Get `RichTextLines` object for list_profile command for a given device.
Args:
device_name: (string) Device name.
device_index: (int) Device index.
device_count: (int) Number of devices.
profile_datum_list: List of `ProfileDatum` objects.
sort_by: (string) Identifier of column to sort. Sort identifier
must match value of SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_MEMORY or SORT_OPS_BY_LINE.
sort_reverse: (bool) Whether to sort in descending instead of default
(ascending) order.
time_unit: time unit, must be in cli_shared.TIME_UNITS.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
screen_cols: (int) Number of columns available on the screen (i.e.,
available screen width).
Returns:
`RichTextLines` object containing a table that displays profiling
information for each op.
"""
profile_data = ProfileDataTableView(profile_datum_list, time_unit=time_unit)
# Calculate total time early to calculate column widths.
total_op_time = sum(datum.op_time for datum in profile_datum_list)
total_exec_time = sum(datum.node_exec_stats.all_end_rel_micros
for datum in profile_datum_list)
device_total_row = [
"Device Total", "",
cli_shared.time_to_readable_str(total_op_time,
force_time_unit=time_unit),
cli_shared.time_to_readable_str(total_exec_time,
force_time_unit=time_unit)]
# Calculate column widths.
column_widths = [
len(column_name) for column_name in profile_data.column_names()]
for col in range(len(device_total_row)):
column_widths[col] = max(column_widths[col], len(device_total_row[col]))
for col in range(len(column_widths)):
for row in range(profile_data.row_count()):
column_widths[col] = max(
column_widths[col], len(profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)))
column_widths[col] += 2 # add margin between columns
# Add device name.
output = [RL("-" * screen_cols)]
device_row = "Device %d of %d: %s" % (
device_index + 1, device_count, device_name)
output.append(RL(device_row))
output.append(RL())
# Add headers.
base_command = "list_profile"
row = RL()
for col in range(profile_data.column_count()):
column_name = profile_data.column_names()[col]
sort_id = profile_data.column_sort_id(col)
command = "%s -s %s" % (base_command, sort_id)
if sort_by == sort_id and not sort_reverse:
command += " -r"
head_menu_item = debugger_cli_common.MenuItem(None, command)
row += RL(column_name, font_attr=[head_menu_item, "bold"])
row += RL(" " * (column_widths[col] - len(column_name)))
output.append(row)
# Add data rows.
for row in range(profile_data.row_count()):
new_row = RL()
for col in range(profile_data.column_count()):
new_cell = profile_data.value(
row,
col,
device_name_filter=device_name_filter,
node_name_filter=node_name_filter,
op_type_filter=op_type_filter)
new_row += new_cell
new_row += RL(" " * (column_widths[col] - len(new_cell)))
output.append(new_row)
# Add stat totals.
row_str = ""
for width, row in zip(column_widths, device_total_row):
row_str += ("{:<%d}" % width).format(row)
output.append(RL())
output.append(RL(row_str))
return debugger_cli_common.rich_text_lines_from_rich_line_list(output)
def _measure_list_profile_column_widths(self, profile_data):
"""Determine the maximum column widths for each data list.
Args:
profile_data: list of ProfileDatum objects.
Returns:
List of column widths in the same order as columns in data.
"""
num_columns = len(profile_data.column_names())
widths = [len(column_name) for column_name in profile_data.column_names()]
for row in range(profile_data.row_count()):
for col in range(num_columns):
widths[col] = max(
widths[col], len(str(profile_data.row_values(row)[col])) + 2)
return widths
_LINE_COST_ATTR = cli_shared.COLOR_CYAN
_LINE_NUM_ATTR = cli_shared.COLOR_YELLOW
_NUM_NODES_HEAD = "#nodes"
_NUM_EXECS_SUB_HEAD = "(#execs)"
_LINENO_HEAD = "lineno"
_SOURCE_HEAD = "source"
def print_source(self, args, screen_info=None):
"""Print a Python source file with line-level profile information.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
"""
del screen_info
parsed = self._arg_parsers["print_source"].parse_args(args)
device_name_regex = (re.compile(parsed.device_name_filter)
if parsed.device_name_filter else None)
profile_data = []
data_generator = self._get_profile_data_generator()
device_count = len(self._run_metadata.step_stats.dev_stats)
for index in range(device_count):
device_stats = self._run_metadata.step_stats.dev_stats[index]
if device_name_regex and not device_name_regex.match(device_stats.device):
continue
profile_data.extend(data_generator(device_stats))
source_annotation = source_utils.annotate_source_against_profile(
profile_data,
os.path.expanduser(parsed.source_file_path),
node_name_filter=parsed.node_name_filter,
op_type_filter=parsed.op_type_filter)
if not source_annotation:
return debugger_cli_common.RichTextLines(
["The source file %s does not contain any profile information for "
"the previous Session run under the following "
"filters:" % parsed.source_file_path,
" --%s: %s" % (_DEVICE_NAME_FILTER_FLAG, parsed.device_name_filter),
" --%s: %s" % (_NODE_NAME_FILTER_FLAG, parsed.node_name_filter),
" --%s: %s" % (_OP_TYPE_FILTER_FLAG, parsed.op_type_filter)])
max_total_cost = 0
for line_index in source_annotation:
total_cost = self._get_total_cost(source_annotation[line_index],
parsed.cost_type)
max_total_cost = max(max_total_cost, total_cost)
source_lines, line_num_width = source_utils.load_source(
parsed.source_file_path)
cost_bar_max_length = 10
total_cost_head = parsed.cost_type
column_widths = {
"cost_bar": cost_bar_max_length + 3,
"total_cost": len(total_cost_head) + 3,
"num_nodes_execs": len(self._NUM_EXECS_SUB_HEAD) + 1,
"line_number": line_num_width,
}
head = RL(
" " * column_widths["cost_bar"] +
total_cost_head +
" " * (column_widths["total_cost"] - len(total_cost_head)) +
self._NUM_NODES_HEAD +
" " * (column_widths["num_nodes_execs"] - len(self._NUM_NODES_HEAD)),
font_attr=self._LINE_COST_ATTR)
head += RL(self._LINENO_HEAD, font_attr=self._LINE_NUM_ATTR)
sub_head = RL(
" " * (column_widths["cost_bar"] +
column_widths["total_cost"]) +
self._NUM_EXECS_SUB_HEAD +
" " * (column_widths["num_nodes_execs"] -
len(self._NUM_EXECS_SUB_HEAD)) +
" " * column_widths["line_number"],
font_attr=self._LINE_COST_ATTR)
sub_head += RL(self._SOURCE_HEAD, font_attr="bold")
lines = [head, sub_head]
output_annotations = {}
for i, line in enumerate(source_lines):
lineno = i + 1
if lineno in source_annotation:
annotation = source_annotation[lineno]
cost_bar = self._render_normalized_cost_bar(
self._get_total_cost(annotation, parsed.cost_type), max_total_cost,
cost_bar_max_length)
annotated_line = cost_bar
annotated_line += " " * (column_widths["cost_bar"] - len(cost_bar))
total_cost = RL(cli_shared.time_to_readable_str(
self._get_total_cost(annotation, parsed.cost_type),
force_time_unit=parsed.time_unit),
font_attr=self._LINE_COST_ATTR)
total_cost += " " * (column_widths["total_cost"] - len(total_cost))
annotated_line += total_cost
file_path_filter = re.escape(parsed.source_file_path) + "$"
command = "lp --file_path_filter %s --min_lineno %d --max_lineno %d" % (
file_path_filter, lineno, lineno + 1)
if parsed.device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
parsed.device_name_filter)
if parsed.node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG,
parsed.node_name_filter)
if parsed.op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG,
parsed.op_type_filter)
menu_item = debugger_cli_common.MenuItem(None, command)
num_nodes_execs = RL("%d(%d)" % (annotation.node_count,
annotation.node_exec_count),
font_attr=[self._LINE_COST_ATTR, menu_item])
num_nodes_execs += " " * (
column_widths["num_nodes_execs"] - len(num_nodes_execs))
annotated_line += num_nodes_execs
else:
annotated_line = RL(
" " * sum(column_widths[col_name] for col_name in column_widths
if col_name != "line_number"))
line_num_column = RL(" L%d" % (lineno), self._LINE_NUM_ATTR)
line_num_column += " " * (
column_widths["line_number"] - len(line_num_column))
annotated_line += line_num_column
annotated_line += line
lines.append(annotated_line)
if parsed.init_line == lineno:
output_annotations[
debugger_cli_common.INIT_SCROLL_POS_KEY] = len(lines) - 1
return debugger_cli_common.rich_text_lines_from_rich_line_list(
lines, annotations=output_annotations)
def _get_total_cost(self, aggregated_profile, cost_type):
if cost_type == "exec_time":
return aggregated_profile.total_exec_time
elif cost_type == "op_time":
return aggregated_profile.total_op_time
else:
raise ValueError("Unsupported cost type: %s" % cost_type)
def _render_normalized_cost_bar(self, cost, max_cost, length):
"""Render a text bar representing a normalized cost.
Args:
cost: the absolute value of the cost.
max_cost: the maximum cost value to normalize the absolute cost with.
length: (int) length of the cost bar, in number of characters, excluding
the brackets on the two ends.
Returns:
An instance of debugger_cli_common.RichTextLine.
"""
num_ticks = int(np.ceil(float(cost) / max_cost * length))
num_ticks = num_ticks or 1 # Minimum is 1 tick.
output = RL("[", font_attr=self._LINE_COST_ATTR)
output += RL("|" * num_ticks + " " * (length - num_ticks),
font_attr=["bold", self._LINE_COST_ATTR])
output += RL("]", font_attr=self._LINE_COST_ATTR)
return output
def get_help(self, handler_name):
return self._arg_parsers[handler_name].format_help()
def create_profiler_ui(graph,
run_metadata,
ui_type="readline",
on_ui_exit=None,
config=None):
"""Create an instance of ReadlineUI based on a `tf.Graph` and `RunMetadata`.
Args:
graph: Python `Graph` object.
run_metadata: A `RunMetadata` protobuf object.
ui_type: (str) requested UI type, e.g., "readline".
on_ui_exit: (`Callable`) the callback to be called when the UI exits.
config: An instance of `cli_config.CLIConfig`.
Returns:
(base_ui.BaseUI) A BaseUI subtype object with a set of standard analyzer
commands and tab-completions registered.
"""
del config # Currently unused.
analyzer = ProfileAnalyzer(graph, run_metadata)
cli = ui_factory.get_ui(ui_type, on_ui_exit=on_ui_exit)
cli.register_command_handler(
"list_profile",
analyzer.list_profile,
analyzer.get_help("list_profile"),
prefix_aliases=["lp"])
cli.register_command_handler(
"print_source",
analyzer.print_source,
analyzer.get_help("print_source"),
prefix_aliases=["ps"])
return cli
| ProfileAnalyzer |
python | ansible__ansible | lib/ansible/modules/user.py | {
"start": 20052,
"end": 20870
} | class ____(ctypes.Structure):
_fields_ = [
('sp_namp', ctypes.c_char_p),
('sp_pwdp', ctypes.c_char_p),
('sp_lstchg', ctypes.c_long),
('sp_min', ctypes.c_long),
('sp_max', ctypes.c_long),
('sp_warn', ctypes.c_long),
('sp_inact', ctypes.c_long),
('sp_expire', ctypes.c_long),
('sp_flag', ctypes.c_ulong),
]
try:
_LIBC = ctypes.cdll.LoadLibrary(
t.cast(
str,
ctypes.util.find_library('c')
)
)
_LIBC.getspnam.argtypes = (ctypes.c_char_p,)
_LIBC.getspnam.restype = ctypes.POINTER(StructSpwdType)
HAVE_SPWD = True
except AttributeError:
HAVE_SPWD = False
_HASH_RE = re.compile(r'[^a-zA-Z0-9./=]')
def getspnam(b_name):
return _LIBC.getspnam(b_name).contents
| StructSpwdType |
python | huggingface__transformers | src/transformers/models/deberta_v2/modeling_deberta_v2.py | {
"start": 17785,
"end": 18870
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = DebertaV2Attention(config)
self.intermediate = DebertaV2Intermediate(config)
self.output = DebertaV2Output(config)
def forward(
self,
hidden_states,
attention_mask,
query_states=None,
relative_pos=None,
rel_embeddings=None,
output_attentions: bool = False,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
attention_output, att_matrix = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
if output_attentions:
return (layer_output, att_matrix)
else:
return (layer_output, None)
| DebertaV2Layer |
python | tensorflow__tensorflow | tensorflow/python/eager/wrap_function_device_test.py | {
"start": 1600,
"end": 2753
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
('cpu_reduce', 'CPU', _dataset_reduce_sum),
('gpu_reduce', 'GPU', _dataset_reduce_sum),
('cpu_loop', 'CPU', _loop_dataset_sum),
('gpu_loop', 'GPU', _loop_dataset_sum),
('cpu_iter', 'CPU', _iter_dataset_sum),
('gpu_iter', 'GPU', _iter_dataset_sum),
)
def testWrapFuncDatasetDevice(self, device_type, dataset_reduce_fn):
devices = config.list_logical_devices(device_type=device_type)
if not devices:
self.skipTest('Skip when {} is not detected by TF'.format(device_type))
@def_function.function
def comp():
return dataset_reduce_fn(dataset_ops.Dataset.range(10))
graph = comp.get_concrete_function().graph
def function_to_wrap():
with ops.device(devices[0].name):
return graph_def_importer.import_graph_def(graph.as_graph_def())
with ops.device(devices[0].name):
wrapped_noarg_fn = wrap_function.wrap_function(
function_to_wrap, signature=[])
wrapped_noarg_fn()
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| WrappedGraphTest |
python | MongoEngine__mongoengine | tests/queryset/test_modify.py | {
"start": 120,
"end": 202
} | class ____(Document):
id = IntField(primary_key=True)
value = IntField()
| Doc |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_of_type.py | {
"start": 398,
"end": 1540
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.of_type"
condition_value_keys = ("type_",)
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, type_, **kwargs):
comp_types = []
try:
comp_types.append(np.dtype(type_).type)
except TypeError:
try:
pd_type = getattr(pd, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
try:
pd_type = getattr(pd.core.dtypes.dtypes, type_)
if isinstance(pd_type, type):
comp_types.append(pd_type)
except AttributeError:
pass
native_type = _native_type_type_map(type_)
if native_type is not None:
comp_types.extend(native_type)
if len(comp_types) < 1:
raise ValueError(f"Unrecognized numpy/python type: {type_}") # noqa: TRY003 # FIXME CoP
return column.map(lambda x: isinstance(x, tuple(comp_types)))
| ColumnValuesOfType |
python | h5py__h5py | h5py/tests/test_dataset.py | {
"start": 67526,
"end": 68316
} | class ____(BaseDataset):
""".astype() wrapper & context manager
"""
def test_astype_wrapper(self):
dset = self.f.create_dataset(make_name(), (100,), dtype='i2')
dset[...] = np.arange(100)
arr = dset.astype('f4')[:]
self.assertArrayEqual(arr, np.arange(100, dtype='f4'))
def test_astype_wrapper_len(self):
dset = self.f.create_dataset(make_name(), (100,), dtype='i2')
dset[...] = np.arange(100)
self.assertEqual(100, len(dset.astype('f4')))
def test_astype_wrapper_asarray(self):
dset = self.f.create_dataset(make_name(), (100,), dtype='i2')
dset[...] = np.arange(100)
arr = np.asarray(dset.astype('f4'), dtype='i2')
self.assertArrayEqual(arr, np.arange(100, dtype='i2'))
| TestAstype |
python | apache__airflow | providers/segment/src/airflow/providers/segment/hooks/segment.py | {
"start": 1175,
"end": 3466
} | class ____(BaseHook):
"""
Create new connection to Segment and allows you to pull data out of Segment or write to it.
You can then use that file with other
Airflow operators to move the data around or interact with segment.
:param segment_conn_id: the name of the connection that has the parameters
we need to connect to Segment. The connection should be type `json` and include a
write_key security token in the `Extras` field.
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
.. note::
You must include a JSON structure in the `Extras` field.
We need a user's security token to connect to Segment.
So we define it in the `Extras` field as:
`{"write_key":"YOUR_SECURITY_TOKEN"}`
"""
conn_name_attr = "segment_conn_id"
default_conn_name = "segment_default"
conn_type = "segment"
hook_name = "Segment"
def __init__(
self, segment_conn_id: str = "segment_default", segment_debug_mode: bool = False, *args, **kwargs
) -> None:
super().__init__()
self.segment_conn_id = segment_conn_id
self.segment_debug_mode = segment_debug_mode
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(self.segment_conn_id)
self.extras = self.connection.extra_dejson
self.write_key = self.extras.get("write_key")
if self.write_key is None:
raise AirflowException("No Segment write key provided")
def get_conn(self) -> analytics:
self.log.info("Setting write key for Segment analytics connection")
analytics.debug = self.segment_debug_mode
if self.segment_debug_mode:
self.log.info("Setting Segment analytics connection to debug mode")
analytics.on_error = self.on_error
analytics.write_key = self.write_key
return analytics
def on_error(self, error: str, items: str) -> None:
"""Handle error callbacks when using Segment with segment_debug_mode set to True."""
self.log.error("Encountered Segment error: %s with items: %s", error, items)
raise AirflowException(f"Segment error: {error}")
| SegmentHook |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 24543,
"end": 25033
} | class ____(RequestHandler):
@gen.coroutine
def get(self):
# Ensure that the flush callback is run whether or not there
# was any output. The gen.Task and direct yield forms are
# equivalent.
yield self.flush() # "empty" flush, but writes headers
yield self.flush() # empty flush
self.write("o")
yield self.flush() # flushes the "o"
yield self.flush() # empty flush
self.finish("k")
| EmptyFlushCallbackHandler |
python | getsentry__sentry | src/sentry/api/serializers/models/groupsearchview.py | {
"start": 527,
"end": 893
} | class ____(TypedDict):
id: str
createdBy: UserSerializerResponse
name: str
query: str
querySort: SORT_LITERALS
projects: list[int]
environments: list[str]
timeFilters: dict
lastVisited: str | None
dateCreated: str
dateUpdated: str
starred: bool
stars: int
@register(GroupSearchView)
| GroupSearchViewSerializerResponse |
python | pikepdf__pikepdf | src/pikepdf/form.py | {
"start": 25171,
"end": 25864
} | class ____(ABC):
"""Appearance stream generators are used to render forms.
They are used by the `pikepdf.form.Form` class to optionally generate appearance
streams as forms are filled.
"""
pdf: Pdf
form: AcroForm
def __init__(self, pdf: Pdf, form: AcroForm):
"""Initialize the appearance stream generator."""
self.pdf = pdf
self.form = form
@abstractmethod
def generate_text(self, field: AcroFormField):
"""Generate the appearance stream for a text field."""
@abstractmethod
def generate_choice(self, field: AcroFormField):
"""Generate the appearance stream for a choice field."""
| AppearanceStreamGenerator |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 4850,
"end": 5584
} | class ____(torch.autograd.Function):
generate_vmap_rule = True
@staticmethod
def forward(x, y):
return x * y
@staticmethod
def setup_context(ctx, inputs, outputs):
ctx.save_for_backward(*inputs)
ctx.save_for_forward(*inputs)
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
gx = None
if ctx.needs_input_grad[0]:
gx = MulGenVmap.apply(grad_output, y)
gy = None
if ctx.needs_input_grad[1]:
gy = MulGenVmap.apply(grad_output, x)
return gx, gy
@staticmethod
def jvp(ctx, x_tangent, y_tangent):
x, y = ctx.saved_tensors
return x_tangent * y + y_tangent * x
| MulGenVmap |
python | mlflow__mlflow | tests/sagemaker/mock/__init__.py | {
"start": 336,
"end": 10782
} | class ____(BaseResponse):
"""
A collection of handlers for SageMaker API calls that produce API-conforming
JSON responses.
"""
@property
def sagemaker_backend(self):
return sagemaker_backends[DEFAULT_ACCOUNT_ID][self.region]
@property
def request_params(self):
return json.loads(self.body)
def create_endpoint_config(self):
"""
Handler for the SageMaker "CreateEndpointConfig" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html.
"""
config_name = self.request_params["EndpointConfigName"]
production_variants = self.request_params.get("ProductionVariants")
tags = self.request_params.get("Tags", [])
async_inference_config = self.request_params.get("AsyncInferenceConfig")
new_config = self.sagemaker_backend.create_endpoint_config(
config_name=config_name,
production_variants=production_variants,
tags=tags,
region_name=self.region,
async_inference_config=async_inference_config,
)
return json.dumps({"EndpointConfigArn": new_config.arn})
def describe_endpoint_config(self):
"""
Handler for the SageMaker "DescribeEndpoint" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html.
"""
config_name = self.request_params["EndpointConfigName"]
config_description = self.sagemaker_backend.describe_endpoint_config(config_name)
return json.dumps(config_description.response_object)
def delete_endpoint_config(self):
"""
Handler for the SageMaker "DeleteEndpointConfig" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpointConfig.html.
"""
config_name = self.request_params["EndpointConfigName"]
self.sagemaker_backend.delete_endpoint_config(config_name)
return ""
def create_endpoint(self):
"""
Handler for the SageMaker "CreateEndpoint" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html.
"""
endpoint_name = self.request_params["EndpointName"]
endpoint_config_name = self.request_params["EndpointConfigName"]
tags = self.request_params.get("Tags", [])
new_endpoint = self.sagemaker_backend.create_endpoint(
endpoint_name=endpoint_name,
endpoint_config_name=endpoint_config_name,
tags=tags,
region_name=self.region,
)
return json.dumps({"EndpointArn": new_endpoint.arn})
def describe_endpoint(self):
"""
Handler for the SageMaker "DescribeEndpoint" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html.
"""
endpoint_name = self.request_params["EndpointName"]
endpoint_description = self.sagemaker_backend.describe_endpoint(endpoint_name)
return json.dumps(endpoint_description.response_object)
def update_endpoint(self):
"""
Handler for the SageMaker "UpdateEndpoint" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html.
"""
endpoint_name = self.request_params["EndpointName"]
new_config_name = self.request_params["EndpointConfigName"]
updated_endpoint = self.sagemaker_backend.update_endpoint(
endpoint_name=endpoint_name, new_config_name=new_config_name
)
return json.dumps({"EndpointArn": updated_endpoint.arn})
def delete_endpoint(self):
"""
Handler for the SageMaker "DeleteEndpoint" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteEndpoint.html.
"""
endpoint_name = self.request_params["EndpointName"]
self.sagemaker_backend.delete_endpoint(endpoint_name)
return ""
def list_endpoints(self):
"""
Handler for the SageMaker "ListEndpoints" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpoints.html.
This function does not support pagination. All endpoint configs are returned in a
single response.
"""
endpoint_summaries = self.sagemaker_backend.list_endpoints()
return json.dumps(
{"Endpoints": [summary.response_object for summary in endpoint_summaries]}
)
def list_endpoint_configs(self):
"""
Handler for the SageMaker "ListEndpointConfigs" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListEndpointConfigs.html.
This function does not support pagination. All endpoint configs are returned in a
single response.
"""
# Note:
endpoint_config_summaries = self.sagemaker_backend.list_endpoint_configs()
return json.dumps(
{"EndpointConfigs": [summary.response_object for summary in endpoint_config_summaries]}
)
def list_models(self):
"""
Handler for the SageMaker "ListModels" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_ListModels.html.
This function does not support pagination. All endpoint configs are returned in a
single response.
"""
model_summaries = self.sagemaker_backend.list_models()
return json.dumps({"Models": [summary.response_object for summary in model_summaries]})
def create_model(self):
"""
Handler for the SageMaker "CreateModel" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html.
"""
model_name = self.request_params["ModelName"]
primary_container = self.request_params["PrimaryContainer"]
execution_role_arn = self.request_params["ExecutionRoleArn"]
tags = self.request_params.get("Tags", [])
vpc_config = self.request_params.get("VpcConfig", None)
new_model = self.sagemaker_backend.create_model(
model_name=model_name,
primary_container=primary_container,
execution_role_arn=execution_role_arn,
tags=tags,
vpc_config=vpc_config,
region_name=self.region,
)
return json.dumps({"ModelArn": new_model.arn})
def describe_model(self):
"""
Handler for the SageMaker "DescribeModel" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeModel.html.
"""
model_name = self.request_params["ModelName"]
model_description = self.sagemaker_backend.describe_model(model_name)
return json.dumps(model_description.response_object)
def delete_model(self):
"""
Handler for the SageMaker "DeleteModel" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_DeleteModel.html.
"""
model_name = self.request_params["ModelName"]
self.sagemaker_backend.delete_model(model_name)
return ""
def list_tags(self):
"""
Handler for the SageMaker "ListTags" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ListTags.html
"""
arn = self.request_params["ResourceArn"]
sagemaker_resource = (
"models" if "model" in arn else "endpoints" if "endpoint" in arn else None
)
results = self.sagemaker_backend.list_tags(
resource_arn=arn, region_name=self.region, resource_type=sagemaker_resource
)
return json.dumps({"Tags": results, "NextToken": None})
def create_transform_job(self):
"""
Handler for the SageMaker "CreateTransformJob" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreateTransformJob.html.
"""
job_name = self.request_params["TransformJobName"]
model_name = self.request_params.get("ModelName")
transform_input = self.request_params.get("TransformInput")
transform_output = self.request_params.get("TransformOutput")
transform_resources = self.request_params.get("TransformResources")
data_processing = self.request_params.get("DataProcessing")
tags = self.request_params.get("Tags", [])
new_job = self.sagemaker_backend.create_transform_job(
job_name=job_name,
model_name=model_name,
transform_input=transform_input,
transform_output=transform_output,
transform_resources=transform_resources,
data_processing=data_processing,
tags=tags,
region_name=self.region,
)
return json.dumps({"TransformJobArn": new_job.arn})
def stop_transform_job(self):
"""
Handler for the SageMaker "StopTransformJob" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_StopTransformJob.html.
"""
job_name = self.request_params["TransformJobName"]
self.sagemaker_backend.stop_transform_job(job_name)
return ""
def describe_transform_job(self):
"""
Handler for the SageMaker "DescribeTransformJob" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeTransformJob.html.
"""
job_name = self.request_params["TransformJobName"]
transform_job_description = self.sagemaker_backend.describe_transform_job(job_name)
return json.dumps(transform_job_description.response_object)
def list_transform_jobs(self):
"""
Handler for the SageMaker "ListTransformJobs" API call documented here:
https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_ListTransformJobs.html.
This function does not support pagination. All transform jobs are returned in a
single response.
"""
transform_job_summaries = self.sagemaker_backend.list_transform_jobs()
return json.dumps(
{
"TransformJobSummaries": [
summary.response_object for summary in transform_job_summaries
]
}
)
| SageMakerResponse |
python | huggingface__transformers | tests/models/switch_transformers/test_modeling_switch_transformers.py | {
"start": 28718,
"end": 32323
} | class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
# For common tests
use_attention_mask=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
is_training=False,
dropout_rate=0.1,
initializer_factor=0.002,
is_encoder_decoder=False,
eos_token_id=1,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
# For common tests
self.seq_length = self.encoder_seq_length
self.use_attention_mask = use_attention_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.is_training = is_training
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = SwitchTransformersConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
return config, input_ids, attention_mask
def create_and_check_model(self, config, input_ids, attention_mask):
model = SwitchTransformersEncoderModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=attention_mask,
)
result = model(input_ids=input_ids)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
def create_and_check_model_fp16_forward(self, config, input_ids, attention_mask):
model = SwitchTransformersEncoderModel(config=config).to(torch_device).half().eval()
output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
| SwitchTransformersEncoderOnlyModelTester |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1000625,
"end": 1001552
} | class ____(sgqlc.types.Type):
"""Represents a user who is a member of a team."""
__schema__ = github_schema
__field_names__ = ("cursor", "member_access_resource_path", "member_access_url", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
member_access_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="memberAccessResourcePath")
"""The HTTP path to the organization's member access page."""
member_access_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="memberAccessUrl")
"""The HTTP URL to the organization's member access page."""
node = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="node")
role = sgqlc.types.Field(sgqlc.types.non_null(TeamMemberRole), graphql_name="role")
"""The role the member has on the team."""
| TeamMemberEdge |
python | sympy__sympy | sympy/stats/stochastic_process_types.py | {
"start": 12614,
"end": 32928
} | class ____(StochasticProcess):
"""
Contains methods that handle queries
common to Markov processes.
"""
@property
def number_of_states(self) -> Integer | Symbol:
"""
The number of states in the Markov Chain.
"""
return _sympify(self.args[2].shape[0]) # type: ignore
@property
def _state_index(self):
"""
Returns state index as Range.
"""
return self.args[1]
@classmethod
def _sanity_checks(cls, state_space, trans_probs):
# Try to never have None as state_space or trans_probs.
# This helps a lot if we get it done at the start.
if (state_space is None) and (trans_probs is None):
_n = Dummy('n', integer=True, nonnegative=True)
state_space = _state_converter(Range(_n))
trans_probs = _matrix_checks(MatrixSymbol('_T', _n, _n))
elif state_space is None:
trans_probs = _matrix_checks(trans_probs)
state_space = _state_converter(Range(trans_probs.shape[0]))
elif trans_probs is None:
state_space = _state_converter(state_space)
if isinstance(state_space, Range):
_n = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
_n = len(state_space)
trans_probs = MatrixSymbol('_T', _n, _n)
else:
state_space = _state_converter(state_space)
trans_probs = _matrix_checks(trans_probs)
# Range object doesn't want to give a symbolic size
# so we do it ourselves.
if isinstance(state_space, Range):
ss_size = ceiling((state_space.stop - state_space.start) / state_space.step)
else:
ss_size = len(state_space)
if ss_size != trans_probs.shape[0]:
raise ValueError('The size of the state space and the number of '
'rows of the transition matrix must be the same.')
return state_space, trans_probs
def _extract_information(self, given_condition):
"""
Helper function to extract information, like,
transition matrix/generator matrix, state space, etc.
"""
if isinstance(self, DiscreteMarkovChain):
trans_probs = self.transition_probabilities
state_index = self._state_index
elif isinstance(self, ContinuousMarkovChain):
trans_probs = self.generator_matrix
state_index = self._state_index
if isinstance(given_condition, And):
gcs = given_condition.args
given_condition = S.true
for gc in gcs:
if isinstance(gc, TransitionMatrixOf):
trans_probs = gc.matrix
if isinstance(gc, StochasticStateSpaceOf):
state_index = gc.state_index
if isinstance(gc, Relational):
given_condition = given_condition & gc
if isinstance(given_condition, TransitionMatrixOf):
trans_probs = given_condition.matrix
given_condition = S.true
if isinstance(given_condition, StochasticStateSpaceOf):
state_index = given_condition.state_index
given_condition = S.true
return trans_probs, state_index, given_condition
def _check_trans_probs(self, trans_probs, row_sum=1):
"""
Helper function for checking the validity of transition
probabilities.
"""
if not isinstance(trans_probs, MatrixSymbol):
rows = trans_probs.tolist()
for row in rows:
if (sum(row) - row_sum) != 0:
raise ValueError("Values in a row must sum to %s. "
"If you are using Float or floats then please use Rational."%(row_sum))
def _work_out_state_index(self, state_index, given_condition, trans_probs):
"""
Helper function to extract state space if there
is a random symbol in the given condition.
"""
# if given condition is None, then there is no need to work out
# state_space from random variables
if given_condition != None:
rand_var = list(given_condition.atoms(RandomSymbol) -
given_condition.atoms(RandomIndexedSymbol))
if len(rand_var) == 1:
state_index = rand_var[0].pspace.set
# `not None` is `True`. So the old test fails for symbolic sizes.
# Need to build the statement differently.
sym_cond = not self.number_of_states.is_Integer
cond1 = not sym_cond and len(state_index) != trans_probs.shape[0]
if cond1:
raise ValueError("state space is not compatible with the transition probabilities.")
if not isinstance(trans_probs.shape[0], Symbol):
state_index = FiniteSet(*range(trans_probs.shape[0]))
return state_index
@cacheit
def _preprocess(self, given_condition, evaluate):
"""
Helper function for pre-processing the information.
"""
is_insufficient = False
if not evaluate: # avoid pre-processing if the result is not to be evaluated
return (True, None, None, None)
# extracting transition matrix and state space
trans_probs, state_index, given_condition = self._extract_information(given_condition)
# given_condition does not have sufficient information
# for computations
if trans_probs is None or \
given_condition is None:
is_insufficient = True
else:
# checking transition probabilities
if isinstance(self, DiscreteMarkovChain):
self._check_trans_probs(trans_probs, row_sum=1)
elif isinstance(self, ContinuousMarkovChain):
self._check_trans_probs(trans_probs, row_sum=0)
# working out state space
state_index = self._work_out_state_index(state_index, given_condition, trans_probs)
return is_insufficient, trans_probs, state_index, given_condition
def replace_with_index(self, condition):
if isinstance(condition, Relational):
lhs, rhs = condition.lhs, condition.rhs
if not isinstance(lhs, RandomIndexedSymbol):
lhs, rhs = rhs, lhs
condition = type(condition)(self.index_of.get(lhs, lhs),
self.index_of.get(rhs, rhs))
return condition
def probability(self, condition, given_condition=None, evaluate=True, **kwargs):
"""
Handles probability queries for Markov process.
Parameters
==========
condition: Relational
given_condition: Relational/And
Returns
=======
Probability
If the information is not sufficient.
Expr
In all other cases.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, new_given_condition = \
self._preprocess(given_condition, evaluate)
rv = list(condition.atoms(RandomIndexedSymbol))
symbolic = False
for sym in rv:
if sym.key.is_symbol:
symbolic = True
break
if check:
return Probability(condition, new_given_condition)
if isinstance(self, ContinuousMarkovChain):
trans_probs = self.transition_probabilities(mat)
elif isinstance(self, DiscreteMarkovChain):
trans_probs = mat
condition = self.replace_with_index(condition)
given_condition = self.replace_with_index(given_condition)
new_given_condition = self.replace_with_index(new_given_condition)
if isinstance(condition, Relational):
if isinstance(new_given_condition, And):
gcs = new_given_condition.args
else:
gcs = (new_given_condition, )
min_key_rv = list(new_given_condition.atoms(RandomIndexedSymbol))
if len(min_key_rv):
min_key_rv = min_key_rv[0]
for r in rv:
if min_key_rv.key.is_symbol or r.key.is_symbol:
continue
if min_key_rv.key > r.key:
return Probability(condition)
else:
min_key_rv = None
return Probability(condition)
if symbolic:
return self._symbolic_probability(condition, new_given_condition, rv, min_key_rv)
if len(rv) > 1:
rv[0] = condition.lhs
rv[1] = condition.rhs
if rv[0].key < rv[1].key:
rv[0], rv[1] = rv[1], rv[0]
if isinstance(condition, Gt):
condition = Lt(condition.lhs, condition.rhs)
elif isinstance(condition, Lt):
condition = Gt(condition.lhs, condition.rhs)
elif isinstance(condition, Ge):
condition = Le(condition.lhs, condition.rhs)
elif isinstance(condition, Le):
condition = Ge(condition.lhs, condition.rhs)
s = Rational(0, 1)
n = len(self.state_space)
if isinstance(condition, (Eq, Ne)):
for i in range(0, n):
s += self.probability(Eq(rv[0], i), Eq(rv[1], i)) * self.probability(Eq(rv[1], i), new_given_condition)
return s if isinstance(condition, Eq) else 1 - s
else:
upper = 0
greater = False
if isinstance(condition, (Ge, Lt)):
upper = 1
if isinstance(condition, (Ge, Gt)):
greater = True
for i in range(0, n):
if i <= n//2:
for j in range(0, i + upper):
s += self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
else:
s += self.probability(Eq(rv[0], i), new_given_condition)
for j in range(i + upper, n):
s -= self.probability(Eq(rv[0], i), Eq(rv[1], j)) * self.probability(Eq(rv[1], j), new_given_condition)
return s if greater else 1 - s
rv = rv[0]
states = condition.as_set()
prob, gstate = {}, None
for gc in gcs:
if gc.has(min_key_rv):
if gc.has(Probability):
p, gp = (gc.rhs, gc.lhs) if isinstance(gc.lhs, Probability) \
else (gc.lhs, gc.rhs)
gr = gp.args[0]
gset = Intersection(gr.as_set(), state_index)
gstate = list(gset)[0]
prob[gset] = p
else:
_, gstate = (gc.lhs.key, gc.rhs) if isinstance(gc.lhs, RandomIndexedSymbol) \
else (gc.rhs.key, gc.lhs)
if not all(k in self.index_set for k in (rv.key, min_key_rv.key)):
raise IndexError("The timestamps of the process are not in it's index set.")
states = Intersection(states, state_index) if not isinstance(self.number_of_states, Symbol) else states
for state in Union(states, FiniteSet(gstate)):
if not state.is_Integer or Ge(state, mat.shape[0]) is True:
raise IndexError("No information is available for (%s, %s) in "
"transition probabilities of shape, (%s, %s). "
"State space is zero indexed."
%(gstate, state, mat.shape[0], mat.shape[1]))
if prob:
gstates = Union(*prob.keys())
if len(gstates) == 1:
gstate = list(gstates)[0]
gprob = list(prob.values())[0]
prob[gstates] = gprob
elif len(gstates) == len(state_index) - 1:
gstate = list(state_index - gstates)[0]
gprob = S.One - sum(prob.values())
prob[state_index - gstates] = gprob
else:
raise ValueError("Conflicting information.")
else:
gprob = S.One
if min_key_rv == rv:
return sum(prob[FiniteSet(state)] for state in states)
if isinstance(self, ContinuousMarkovChain):
return gprob * sum(trans_probs(rv.key - min_key_rv.key).__getitem__((gstate, state))
for state in states)
if isinstance(self, DiscreteMarkovChain):
return gprob * sum((trans_probs**(rv.key - min_key_rv.key)).__getitem__((gstate, state))
for state in states)
if isinstance(condition, Not):
expr = condition.args[0]
return S.One - self.probability(expr, given_condition, evaluate, **kwargs)
if isinstance(condition, And):
compute_later, state2cond, conds = [], {}, condition.args
for expr in conds:
if isinstance(expr, Relational):
ris = list(expr.atoms(RandomIndexedSymbol))[0]
if state2cond.get(ris, None) is None:
state2cond[ris] = S.true
state2cond[ris] &= expr
else:
compute_later.append(expr)
ris = []
for ri in state2cond:
ris.append(ri)
cset = Intersection(state2cond[ri].as_set(), state_index)
if len(cset) == 0:
return S.Zero
state2cond[ri] = cset.as_relational(ri)
sorted_ris = sorted(ris, key=lambda ri: ri.key)
prod = self.probability(state2cond[sorted_ris[0]], given_condition, evaluate, **kwargs)
for i in range(1, len(sorted_ris)):
ri, prev_ri = sorted_ris[i], sorted_ris[i-1]
if not isinstance(state2cond[ri], Eq):
raise ValueError("The process is in multiple states at %s, unable to determine the probability."%(ri))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
prod *= self.probability(state2cond[ri], state2cond[prev_ri]
& mat_of
& StochasticStateSpaceOf(self, state_index),
evaluate, **kwargs)
for expr in compute_later:
prod *= self.probability(expr, given_condition, evaluate, **kwargs)
return prod
if isinstance(condition, Or):
return sum(self.probability(expr, given_condition, evaluate, **kwargs)
for expr in condition.args)
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(condition, given_condition))
def _symbolic_probability(self, condition, new_given_condition, rv, min_key_rv):
#Function to calculate probability for queries with symbols
if isinstance(condition, Relational):
curr_state = new_given_condition.rhs if isinstance(new_given_condition.lhs, RandomIndexedSymbol) \
else new_given_condition.lhs
next_state = condition.rhs if isinstance(condition.lhs, RandomIndexedSymbol) \
else condition.lhs
if isinstance(condition, (Eq, Ne)):
if isinstance(self, DiscreteMarkovChain):
P = self.transition_probabilities**(rv[0].key - min_key_rv.key)
else:
P = exp(self.generator_matrix*(rv[0].key - min_key_rv.key))
prob = P[curr_state, next_state] if isinstance(condition, Eq) else 1 - P[curr_state, next_state]
return Piecewise((prob, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
upper = 1
greater = False
if isinstance(condition, (Ge, Lt)):
upper = 0
if isinstance(condition, (Ge, Gt)):
greater = True
k = Dummy('k')
condition = Eq(condition.lhs, k) if isinstance(condition.lhs, RandomIndexedSymbol)\
else Eq(condition.rhs, k)
total = Sum(self.probability(condition, new_given_condition), (k, next_state + upper, self.state_space._sup))
return Piecewise((total, rv[0].key > min_key_rv.key), (Probability(condition), True)) if greater\
else Piecewise((1 - total, rv[0].key > min_key_rv.key), (Probability(condition), True))
else:
return Probability(condition, new_given_condition)
def expectation(self, expr, condition=None, evaluate=True, **kwargs):
"""
Handles expectation queries for markov process.
Parameters
==========
expr: RandomIndexedSymbol, Relational, Logic
Condition for which expectation has to be computed. Must
contain a RandomIndexedSymbol of the process.
condition: Relational, Logic
The given conditions under which computations should be done.
Returns
=======
Expectation
Unevaluated object if computations cannot be done due to
insufficient information.
Expr
In all other cases when the computations are successful.
Note
====
Any information passed at the time of query overrides
any information passed at the time of object creation like
transition probabilities, state space.
Pass the transition matrix using TransitionMatrixOf,
generator matrix using GeneratorMatrixOf and state space
using StochasticStateSpaceOf in given_condition using & or And.
"""
check, mat, state_index, condition = \
self._preprocess(condition, evaluate)
if check:
return Expectation(expr, condition)
rvs = random_symbols(expr)
if isinstance(expr, Expr) and isinstance(condition, Eq) \
and len(rvs) == 1:
# handle queries similar to E(f(X[i]), Eq(X[i-m], <some-state>))
condition=self.replace_with_index(condition)
state_index=self.replace_with_index(state_index)
rv = list(rvs)[0]
lhsg, rhsg = condition.lhs, condition.rhs
if not isinstance(lhsg, RandomIndexedSymbol):
lhsg, rhsg = (rhsg, lhsg)
if rhsg not in state_index:
raise ValueError("%s state is not in the state space."%(rhsg))
if rv.key < lhsg.key:
raise ValueError("Incorrect given condition is given, expectation "
"time %s < time %s"%(rv.key, rv.key))
mat_of = TransitionMatrixOf(self, mat) if isinstance(self, DiscreteMarkovChain) else GeneratorMatrixOf(self, mat)
cond = condition & mat_of & \
StochasticStateSpaceOf(self, state_index)
func = lambda s: self.probability(Eq(rv, s), cond) * expr.subs(rv, self._state_index[s])
return sum(func(s) for s in state_index)
raise NotImplementedError("Mechanism for handling (%s, %s) queries hasn't been "
"implemented yet."%(expr, condition))
| MarkovProcess |
python | kamyu104__LeetCode-Solutions | Python/boundary-of-binary-tree.py | {
"start": 29,
"end": 1294
} | class ____(object):
def boundaryOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def leftBoundary(root, nodes):
if not root or (not root.left and not root.right):
return
nodes.append(root.val)
if not root.left:
leftBoundary(root.right, nodes)
else:
leftBoundary(root.left, nodes)
def rightBoundary(root, nodes):
if not root or (not root.left and not root.right):
return
if not root.right:
rightBoundary(root.left, nodes)
else:
rightBoundary(root.right, nodes)
nodes.append(root.val)
def leaves(root, nodes):
if not root:
return
if not root.left and not root.right:
nodes.append(root.val)
return
leaves(root.left, nodes)
leaves(root.right, nodes)
if not root:
return []
nodes = [root.val]
leftBoundary(root.left, nodes)
leaves(root.left, nodes)
leaves(root.right, nodes)
rightBoundary(root.right, nodes)
return nodes
| Solution |
python | geekcomputers__Python | LinkedLists all Types/singly_linked_list.py | {
"start": 518,
"end": 626
} | class ____:
def __init__(self, val=None, next=None):
self.data = val
self.next = next
| Node |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 19246,
"end": 19603
} | class ____:
timeout = 60
repeat = 1
number = 5
def setup(self):
# TODO: Lazily skipped in CI as it is very demanding and slow.
# Improve times and remove errors.
_skip_slow()
requires_dask()
self.write = create_delayed_write()
def time_write(self):
self.write.compute()
| IOWriteNetCDFDask |
python | astropy__astropy | astropy/io/registry/tests/test_registries.py | {
"start": 1621,
"end": 2818
} | class ____:
"""A different class with different I/O"""
read = classmethod(io_registry.read)
write = io_registry.write
def empty_reader(*args, **kwargs):
return EmptyData()
def empty_writer(table, *args, **kwargs):
return "status: success"
def empty_identifier(*args, **kwargs):
return True
@pytest.fixture
def fmtcls1():
return ("test1", EmptyData)
@pytest.fixture
def fmtcls2():
return ("test2", EmptyData)
@pytest.fixture(params=["test1", "test2"])
def fmtcls(request):
return (request.param, EmptyData)
@pytest.fixture
def original():
ORIGINAL = {}
ORIGINAL["readers"] = deepcopy(default_registry._readers)
ORIGINAL["writers"] = deepcopy(default_registry._writers)
ORIGINAL["identifiers"] = deepcopy(default_registry._identifiers)
return ORIGINAL
###############################################################################
def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1]
def test_IORegistryError():
with pytest.raises(IORegistryError, match="just checking"):
raise IORegistryError("just checking")
| OtherEmptyData |
python | pytorch__pytorch | torch/nn/attention/flex_attention.py | {
"start": 2802,
"end": 9603
} | class ____(TypedDict, total=False):
"""Options for controlling the behavior of FlexAttention kernels.
These options are passed to the underlying Triton kernels to control performance
and numerical behavior. Most users will not need to specify these options as the
default autotuning provides good performance.
The options can be prefixed with ``fwd_`` or ``bwd_`` to apply only to forward or
backward pass respectively. For example: ``fwd_BLOCK_M`` and ``bwd_BLOCK_M1``.
Note:
We currently do not provide any backward compatibility guarantees for these options.
That being said most of these have remained pretty stable since their introduction. But
We do not consider this part of the public API just yet. We think that some documentation
Is better than secret hidden flags, but we may change these options in the future.
Example Usage:
.. code-block:: python
# Using dictionary (backward compatible)
kernel_opts = {"BLOCK_M": 64, "BLOCK_N": 64, "PRESCALE_QK": True}
output = flex_attention(q, k, v, kernel_options=kernel_opts)
# Using TypedDict (recommended for type safety)
from torch.nn.attention.flex_attention import FlexKernelOptions
kernel_opts: FlexKernelOptions = {
"BLOCK_M": 64,
"BLOCK_N": 64,
"PRESCALE_QK": True,
}
output = flex_attention(q, k, v, kernel_options=kernel_opts)
# Forward/backward specific options
kernel_opts: FlexKernelOptions = {
"fwd_BLOCK_M": 64,
"bwd_BLOCK_M1": 32,
"PRESCALE_QK": False,
}
output = flex_attention(q, k, v, kernel_options=kernel_opts)
"""
# Performance tuning options
# pyrefly: ignore [invalid-annotation]
num_warps: NotRequired[int]
"""Number of warps to use in the CUDA kernel. Higher values may improve performance
but increase register pressure. Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
num_stages: NotRequired[int]
"""Number of pipeline stages in the CUDA kernel. Higher values may improve performance
but increase shared memory usage. Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
BLOCK_M: NotRequired[int]
"""Thread block size for the sequence length dimension of Q in forward pass.
Must be a power of 2. Common values: 16, 32, 64, 128. Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
BLOCK_N: NotRequired[int]
"""Thread block size for the sequence length dimension of K/V in forward pass.
Must be a power of 2. Common values: 16, 32, 64, 128. Default is determined by autotuning."""
# Backward-specific block sizes (when prefixed with 'bwd_')
# pyrefly: ignore [invalid-annotation]
BLOCK_M1: NotRequired[int]
"""Thread block size for Q dimension in backward pass. Use as 'bwd_BLOCK_M1'.
Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
BLOCK_N1: NotRequired[int]
"""Thread block size for K/V dimension in backward pass. Use as 'bwd_BLOCK_N1'.
Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
BLOCK_M2: NotRequired[int]
"""Thread block size for second Q dimension in backward pass. Use as 'bwd_BLOCK_M2'.
Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
BLOCK_N2: NotRequired[int]
"""Thread block size for second K/V dimension in backward pass. Use as 'bwd_BLOCK_N2'.
Default is determined by autotuning."""
# pyrefly: ignore [invalid-annotation]
PRESCALE_QK: NotRequired[bool]
"""Whether to pre-scale QK by 1/sqrt(d) and change of base. This is slightly faster but
may have more numerical error. Default: False."""
# pyrefly: ignore [invalid-annotation]
ROWS_GUARANTEED_SAFE: NotRequired[bool]
"""If True, guarantees that at least one value in each row is not masked out.
Allows skipping safety checks for better performance. Only set this if you are certain
your mask guarantees this property. For example, causal attention is guaranteed safe
because each query has at least 1 key-value to attend to. Default: False."""
# pyrefly: ignore [invalid-annotation]
BLOCKS_ARE_CONTIGUOUS: NotRequired[bool]
"""If True, guarantees that all blocks in the mask are contiguous.
Allows optimizing block traversal. For example, causal masks would satisfy this,
but prefix_lm + sliding window would not. Default: False."""
# pyrefly: ignore [invalid-annotation]
WRITE_DQ: NotRequired[bool]
"""Controls whether gradient scatters are done in the DQ iteration loop of the backward pass.
Setting this to False will force this to happen in the DK loop which depending on your
specific score_mod and mask_mod might be faster. Default: True."""
# pyrefly: ignore [invalid-annotation]
FORCE_USE_FLEX_ATTENTION: NotRequired[bool]
"""If True, forces the use of the flex attention kernel instead of potentially using
the more optimized flex-decoding kernel for short sequences. This can be a helpful
option for debugging. Default: False."""
# pyrefly: ignore [invalid-annotation]
USE_TMA: NotRequired[bool]
"""Whether to use Tensor Memory Accelerator (TMA) on supported hardware.
This is experimental and may not work on all hardware, currently specific
to NVIDIA GPUs Hopper+. Default: False."""
# ROCm-specific options
# pyrefly: ignore [invalid-annotation]
kpack: NotRequired[int]
"""ROCm-specific kernel packing parameter."""
# pyrefly: ignore [invalid-annotation]
matrix_instr_nonkdim: NotRequired[int]
"""ROCm-specific matrix instruction non-K dimension."""
# pyrefly: ignore [invalid-annotation]
waves_per_eu: NotRequired[int]
"""ROCm-specific waves per execution unit."""
# pyrefly: ignore [invalid-annotation]
BACKEND: NotRequired[_Backend]
"""Selects a specific kernel backend.
Options:
- "AUTO": Use current heuristics (typically Triton-based kernels with
automatic selection between flex_attention and flex_decoding)
- "TRITON": Standard Triton flex_attention kernel
- "TRITON_DECODE": Triton flex_decoding kernel, only available for short sequence lengths with specific configurations
- "FLASH": Experimental: Flash Attention kernel (cute-dsl), user needs to have flash installed
This option cannot be combined with legacy knobs such as ``FORCE_USE_FLEX_ATTENTION``.
Raises an error if the requested backend cannot be used. Default: "AUTO"
"""
| FlexKernelOptions |
python | sqlalchemy__sqlalchemy | test/sql/test_selectable.py | {
"start": 69094,
"end": 79560
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_join_condition_one(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4", m, Column("id", Integer), Column("t2id", ForeignKey("t2.id"))
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
for left, right, a_subset, expected in [
(t1, t2, None, t1.c.id == t2.c.t1id),
(t1t2, t3, t2, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t3, t1.c.id == t3.c.t1id),
(t2t3, t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t3, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, None, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t1, t2t3.c.t2_id == t4.c.t2id),
(t1t2, t2t3, t2, t1t2.c.t2_id == t2t3.c.t3_t2id),
]:
assert expected.compare(
sql_util.join_condition(left, right, a_subset=a_subset)
)
def test_join_condition_two(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4", m, Column("id", Integer), Column("t2id", ForeignKey("t2.id"))
)
t5 = Table(
"t5",
m,
Column("t1id1", ForeignKey("t1.id")),
Column("t1id2", ForeignKey("t1.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
# these are ambiguous, or have no joins
for left, right, a_subset in [
(t1t2, t3, None),
(t2t3, t1, None),
(t1, t4, None),
(t1t2, t2t3, None),
(t5, t1, None),
(
t5.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery(),
t1,
None,
),
]:
assert_raises(
exc.ArgumentError,
sql_util.join_condition,
left,
right,
a_subset=a_subset,
)
def test_join_condition_three(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t4 = Table(
"t4",
m,
Column("id", Integer),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
als = t2t3._anonymous_fromclause()
# test join's behavior, including natural
for left, right, expected in [
(t1, t2, t1.c.id == t2.c.t1id),
(t1t2, t3, t1t2.c.t2_id == t3.c.t2id),
(t2t3, t1, t1.c.id == t3.c.t1id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3, t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t2t3.join(t1), t4, t2t3.c.t2_id == t4.c.t2id),
(t1t2, als, t1t2.c.t2_id == als.c.t3_t2id),
]:
assert expected.compare(left.join(right).onclause)
def test_join_condition_four(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
# these are right-nested joins
j = t1t2.join(t2t3)
assert j.onclause.compare(t2.c.id == t3.c.t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(t2 JOIN t3 ON t2.id = t3.t2id) ON t2.id = t3.t2id",
)
def test_join_condition_five(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer))
t2 = Table(
"t2", m, Column("id", Integer), Column("t1id", ForeignKey("t1.id"))
)
t3 = Table(
"t3",
m,
Column("id", Integer),
Column("t1id", ForeignKey("t1.id")),
Column("t2id", ForeignKey("t2.id")),
)
t1t2 = t1.join(t2)
t2t3 = t2.join(t3)
st2t3 = (
t2t3.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
j = t1t2.join(st2t3)
assert j.onclause.compare(t2.c.id == st2t3.c.t3_t2id)
self.assert_compile(
j,
"t1 JOIN t2 ON t1.id = t2.t1id JOIN "
"(SELECT t2.id AS t2_id, t2.t1id AS t2_t1id, "
"t3.id AS t3_id, t3.t1id AS t3_t1id, t3.t2id AS t3_t2id "
"FROM t2 JOIN t3 ON t2.id = t3.t2id) AS anon_1 "
"ON t2.id = anon_1.t3_t2id",
)
def test_join_multiple_equiv_fks(self):
m = MetaData()
t1 = Table("t1", m, Column("id", Integer, primary_key=True))
t2 = Table(
"t2",
m,
Column("t1id", Integer, ForeignKey("t1.id"), ForeignKey("t1.id")),
)
assert sql_util.join_condition(t1, t2).compare(t1.c.id == t2.c.t1id)
def test_join_cond_no_such_unrelated_table(self):
m = MetaData()
# bounding the "good" column with two "bad" ones is so to
# try to get coverage to get the "continue" statements
# in the loop...
t1 = Table(
"t1",
m,
Column("y", Integer, ForeignKey("t22.id")),
Column("x", Integer, ForeignKey("t2.id")),
Column("q", Integer, ForeignKey("t22.id")),
)
t2 = Table("t2", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_column(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, ForeignKey("t2.id")),
Column("y", Integer, ForeignKey("t3.q")),
)
t2 = Table("t2", m, Column("id", Integer))
Table("t3", m, Column("id", Integer))
assert sql_util.join_condition(t1, t2).compare(t1.c.x == t2.c.id)
assert sql_util.join_condition(t2, t1).compare(t1.c.x == t2.c.id)
def test_join_cond_no_such_unrelated_table_dont_compare_names(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("y", Integer, ForeignKey("t22.id")),
Column("x", Integer, ForeignKey("t2.id")),
Column("q", Integer, ForeignKey("t22.id")),
)
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t3id", ForeignKey("t3.id")),
Column("z", ForeignKey("t33.id")),
)
t3 = Table(
"t3", m, Column("id", Integer), Column("q", ForeignKey("t4.id"))
)
j1 = t1.join(t2)
assert sql_util.join_condition(j1, t3).compare(t2.c.t3id == t3.c.id)
def test_join_cond_no_such_unrelated_column_dont_compare_names(self):
m = MetaData()
t1 = Table(
"t1",
m,
Column("x", Integer, ForeignKey("t2.id")),
)
t2 = Table(
"t2",
m,
Column("id", Integer),
Column("t3id", ForeignKey("t3.id")),
Column("q", ForeignKey("t5.q")),
)
t3 = Table(
"t3", m, Column("id", Integer), Column("t4id", ForeignKey("t4.id"))
)
t4 = Table("t4", m, Column("id", Integer))
Table("t5", m, Column("id", Integer))
j1 = t1.join(t2)
j2 = t3.join(t4)
assert sql_util.join_condition(j1, j2).compare(t2.c.t3id == t3.c.id)
def test_join_cond_no_such_related_table(self):
m1 = MetaData()
m2 = MetaData()
t1 = Table("t1", m1, Column("x", Integer, ForeignKey("t2.id")))
t2 = Table("t2", m2, Column("id", Integer))
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedTableError,
"Foreign key associated with column 't1.x' could not find "
"table 't2' with which to generate a foreign key to "
"target column 'id'",
sql_util.join_condition,
t2,
t1,
)
def test_join_cond_no_such_related_column(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer, ForeignKey("t2.q")))
t2 = Table("t2", m, Column("id", Integer))
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t1,
t2,
)
assert_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for "
"ForeignKey 't2.q' on table 't1': "
"table 't2' has no column named 'q'",
sql_util.join_condition,
t2,
t1,
)
| JoinConditionTest |
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 35721,
"end": 40298
} | class ____(MegatronBertPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight",
"cls.predictions.decoder.bias": "cls.predictions.bias",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `MegatronBertForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = MegatronBertModel(config, add_pooling_layer=False)
self.cls = MegatronBertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithCrossAttentions]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`
Example:
```python
>>> from transformers import AutoTokenizer, MegatronBertForCausalLM, MegatronBertConfig
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("nvidia/megatron-bert-cased-345m")
>>> model = MegatronBertForCausalLM.from_pretrained("nvidia/megatron-bert-cased-345m", is_decoder=True)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.cls(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring
| MegatronBertForCausalLM |
python | huggingface__transformers | src/transformers/models/falcon_h1/modular_falcon_h1.py | {
"start": 43689,
"end": 54956
} | class ____(FalconH1PreTrainedModel):
def __init__(self, config: FalconH1Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
decoder_layers.append(FalconH1DecoderLayer(config, layer_idx=i))
self.layers = nn.ModuleList(decoder_layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = FalconH1RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = FalconH1RotaryEmbedding(config=config)
self.embedding_multiplier = config.embedding_multiplier
self.lm_head_multiplier = config.lm_head_multiplier
self.gradient_checkpointing = False
# Compute the MuP vector once and register it for all layers
mup_vector = compute_mup_vector(config)
for layer in self.layers:
layer.mamba.register_buffer("mup_vector", mup_vector, persistent=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[FalconHybridMambaAttentionDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs, # NOOP kwargs, for now
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embedding_multiplier
hidden_states = inputs_embeds
if use_cache and past_key_values is None:
logger.warning_once(
"FalconH1 requires an initialized `FalconHybridMambaAttentionDynamicCache` to return a cache. None was "
"provided, so no cache will be returned."
)
if cache_position is None:
cache_position = torch.arange(hidden_states.shape[1], device=hidden_states.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
mamba_attention_mask=mamba_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
# append attentions only of attention layers. Mamba layers return `None` as the attention weights
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
next_cache = None if not use_cache else past_key_values
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if cache_position[0] > 0 or (attention_mask is not None and torch.all(attention_mask == 1)):
mamba_mask = None
return mamba_mask
def _update_causal_mask(
self,
attention_mask: torch.Tensor,
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: FalconHybridMambaAttentionDynamicCache,
output_attentions: bool,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
sequence_length = input_tensor.shape[1]
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
min_dtype = torch.finfo(dtype).min
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_attention_mask = (attention_mask[:, None, None, :] == attention_mask[:, None, :, None])[
:, :, -sequence_length:, :
].to(dtype)
padding_mask = causal_mask[:, :, :, :mask_length] + padding_attention_mask
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
| FalconH1Model |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_cloud_composer.py | {
"start": 5806,
"end": 6852
} | class ____:
def test_serialize(self, dag_run_trigger):
actual_data = dag_run_trigger.serialize()
expected_data = (
"airflow.providers.google.cloud.triggers.cloud_composer.CloudComposerDAGRunTrigger",
{
"project_id": TEST_PROJECT_ID,
"region": TEST_LOCATION,
"environment_id": TEST_ENVIRONMENT_ID,
"composer_dag_id": TEST_COMPOSER_DAG_ID,
"composer_dag_run_id": TEST_COMPOSER_DAG_RUN_ID,
"start_date": TEST_START_DATE,
"end_date": TEST_END_DATE,
"allowed_states": TEST_ALLOWED_STATES,
"gcp_conn_id": TEST_GCP_CONN_ID,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
"poll_interval": TEST_POLL_INTERVAL,
"composer_airflow_version": TEST_COMPOSER_AIRFLOW_VERSION,
"use_rest_api": TEST_USE_REST_API,
},
)
assert actual_data == expected_data
| TestCloudComposerDAGRunTrigger |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 4368,
"end": 4701
} | class ____(object):
def __init__(self, point, vector):
self.point = point
self.vector = vector.normalized()
def __repr__(self):
return 'Ray(%s,%s)' % (repr(self.point), repr(self.vector))
def pointAtTime(self, t):
return self.point + self.vector.scale(t)
Point.ZERO = Point(0, 0, 0)
| Ray |
python | django-guardian__django-guardian | example_project/articles/models.py | {
"start": 1071,
"end": 1445
} | class ____(UserObjectPermissionAbstract):
id = models.BigAutoField(editable=False, unique=True, primary_key=True)
class Meta(UserObjectPermissionAbstract.Meta):
abstract = False
indexes = [
*UserObjectPermissionAbstract.Meta.indexes,
models.Index(fields=["content_type", "object_pk", "user"]),
]
| BigUserObjectPermission |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_hint_returned.py | {
"start": 676,
"end": 843
} | class ____:
""" __length_hint__ returns a negative integer """
def __length_hint__(self): # [invalid-length-hint-returned]
return -1
| FirstBadLengthHint |
python | falconry__falcon | falcon/_typing.py | {
"start": 4448,
"end": 5521
} | class ____(Protocol):
async def __call__(
self, req: AsgiRequest, ws: WebSocket, **kwargs: Any
) -> None: ...
AsgiReceive = Callable[[], Awaitable['AsgiEvent']]
AsgiSend = Callable[['AsgiSendMsg'], Awaitable[None]]
AsgiProcessRequestMethod = Callable[['AsgiRequest', 'AsgiResponse'], Awaitable[None]]
AsgiProcessResourceMethod = Callable[
['AsgiRequest', 'AsgiResponse', Optional[Resource], dict[str, Any]], Awaitable[None]
]
AsgiProcessResponseMethod = Callable[
['AsgiRequest', 'AsgiResponse', Optional[Resource], bool], Awaitable[None]
]
AsgiProcessRequestWsMethod = Callable[['AsgiRequest', 'WebSocket'], Awaitable[None]]
AsgiProcessResourceWsMethod = Callable[
['AsgiRequest', 'WebSocket', Optional[Resource], dict[str, Any]], Awaitable[None]
]
ResponseCallbacks = Union[
tuple[Callable[[], None], Literal[False]],
tuple[Callable[[], Awaitable[None]], Literal[True]],
]
# Routing
MethodDict = Union[
dict[str, ResponderCallable],
dict[str, Union[AsgiResponderCallable, AsgiResponderWsCallable]],
]
| AsgiResponderWsCallable |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/black/cases/docstring_no_string_normalization.py | {
"start": 0,
"end": 1766
} | class ____:
'''
A multiline class docstring.
'''
def AnEquallyLonelyMethod(self):
'''
A multiline method docstring'''
pass
def one_function():
'''This is a docstring with a single line of text.'''
pass
def shockingly_the_quotes_are_normalized():
'''This is a multiline docstring.
This is a multiline docstring.
This is a multiline docstring.
'''
pass
def foo():
"""This is a docstring with
some lines of text here
"""
return
def baz():
'''"This" is a string with some
embedded "quotes"'''
return
def poit():
"""
Lorem ipsum dolor sit amet.
Consectetur adipiscing elit:
- sed do eiusmod tempor incididunt ut labore
- dolore magna aliqua
- enim ad minim veniam
- quis nostrud exercitation ullamco laboris nisi
- aliquip ex ea commodo consequat
"""
pass
def under_indent():
"""
These lines are indented in a way that does not
make sense.
"""
pass
def over_indent():
"""
This has a shallow indent
- But some lines are deeper
- And the closing quote is too deep
"""
pass
def single_line():
"""But with a newline after it!
"""
pass
def this():
r"""
'hey ho'
"""
def that():
""" "hey yah" """
def and_that():
"""
"hey yah" """
def and_this():
'''
"hey yah"'''
def believe_it_or_not_this_is_in_the_py_stdlib(): '''
"hey yah"'''
def shockingly_the_quotes_are_normalized_v2():
'''
Docstring Docstring Docstring
'''
pass
def backslash_space():
'\ '
def multiline_backslash_1():
'''
hey\there\
\ '''
def multiline_backslash_2():
'''
hey there \ '''
def multiline_backslash_3():
'''
already escaped \\ '''
| ALonelyClass |
python | pypa__setuptools | setuptools/tests/test_sdist.py | {
"start": 2919,
"end": 28720
} | class ____:
@pytest.fixture(autouse=True)
def source_dir(self, tmpdir):
tmpdir = tmpdir / "project_root"
tmpdir.mkdir()
(tmpdir / 'setup.py').write_text(SETUP_PY, encoding='utf-8')
# Set up the rest of the test package
test_pkg = tmpdir / 'sdist_test'
test_pkg.mkdir()
data_folder = tmpdir / 'd'
data_folder.mkdir()
# *.rst was not included in package_data, so c.rst should not be
# automatically added to the manifest when not under version control
for fname in ['__init__.py', 'a.txt', 'b.txt', 'c.rst']:
touch(test_pkg / fname)
touch(data_folder / 'e.dat')
# C sources are not included by default, but they will be,
# if an extension module uses them as sources or depends
for fname in EXTENSION_SOURCES:
touch(tmpdir / fname)
with tmpdir.as_cwd():
yield tmpdir
def assert_package_data_in_manifest(self, cmd):
manifest = cmd.filelist.files
assert os.path.join('sdist_test', 'a.txt') in manifest
assert os.path.join('sdist_test', 'b.txt') in manifest
assert os.path.join('sdist_test', 'c.rst') not in manifest
assert os.path.join('d', 'e.dat') in manifest
def setup_with_extension(self):
setup_attrs = {**SETUP_ATTRS, 'ext_modules': [EXTENSION]}
dist = Distribution(setup_attrs)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
return cmd
def test_package_data_in_sdist(self):
"""Regression test for pull request #4: ensures that files listed in
package_data are included in the manifest even if they're not added to
version control.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
self.assert_package_data_in_manifest(cmd)
def test_package_data_and_include_package_data_in_sdist(self):
"""
Ensure package_data and include_package_data work
together.
"""
setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
assert setup_attrs['package_data']
dist = Distribution(setup_attrs)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
self.assert_package_data_in_manifest(cmd)
def test_extension_sources_in_sdist(self):
"""
Ensure that the files listed in Extension.sources and Extension.depends
are automatically included in the manifest.
"""
cmd = self.setup_with_extension()
self.assert_package_data_in_manifest(cmd)
manifest = cmd.filelist.files
for path in EXTENSION_SOURCES:
assert path in manifest
def test_missing_extension_sources(self):
"""
Similar to test_extension_sources_in_sdist but the referenced files don't exist.
Missing files should not be included in distribution (with no error raised).
"""
for path in EXTENSION_SOURCES:
os.remove(path)
cmd = self.setup_with_extension()
self.assert_package_data_in_manifest(cmd)
manifest = cmd.filelist.files
for path in EXTENSION_SOURCES:
assert path not in manifest
def test_symlinked_extension_sources(self):
"""
Similar to test_extension_sources_in_sdist but the referenced files are
instead symbolic links to project-local files. Referenced file paths
should be included. Symlink targets themselves should NOT be included.
"""
symlinked = []
for path in EXTENSION_SOURCES:
base, ext = os.path.splitext(path)
target = base + "_target." + ext
os.rename(path, target)
symlink_or_skip_test(os.path.basename(target), path)
symlinked.append(target)
cmd = self.setup_with_extension()
self.assert_package_data_in_manifest(cmd)
manifest = cmd.filelist.files
for path in EXTENSION_SOURCES:
assert path in manifest
for path in symlinked:
assert path not in manifest
_INVALID_PATHS = {
"must be relative": lambda: (
os.path.abspath(os.path.join("sdist_test", "f.h"))
),
"can't have `..` segments": lambda: (
os.path.join("sdist_test", "..", "sdist_test", "f.h")
),
"doesn't exist": lambda: (
os.path.join("sdist_test", "this_file_does_not_exist.h")
),
"must be inside the project root": lambda: (
symlink_or_skip_test(
touch(os.path.join("..", "outside_of_project_root.h")),
"symlink.h",
)
),
}
@skip_under_stdlib_distutils
@pytest.mark.parametrize("reason", _INVALID_PATHS.keys())
def test_invalid_extension_depends(self, reason, caplog):
"""
Due to backwards compatibility reasons, `Extension.depends` should accept
invalid/weird paths, but then ignore them when building a sdist.
This test verifies that the source distribution is still built
successfully with such paths, but that instead of adding these paths to
the manifest, we emit an informational message, notifying the user that
the invalid path won't be automatically included.
"""
invalid_path = self._INVALID_PATHS[reason]()
extension = Extension(
name="sdist_test.f",
sources=[],
depends=[invalid_path],
)
setup_attrs = {**SETUP_ATTRS, 'ext_modules': [extension]}
dist = Distribution(setup_attrs)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet(), caplog.at_level(logging.INFO):
cmd.run()
self.assert_package_data_in_manifest(cmd)
manifest = cmd.filelist.files
assert invalid_path not in manifest
expected_message = [
message
for (logger, level, message) in caplog.record_tuples
if (
logger == "root" #
and level == logging.INFO #
and invalid_path in message #
)
]
assert len(expected_message) == 1
(expected_message,) = expected_message
assert reason in expected_message
def test_custom_build_py(self):
"""
Ensure projects defining custom build_py don't break
when creating sdists (issue #2849)
"""
from distutils.command.build_py import build_py as OrigBuildPy
using_custom_command_guard = mock.Mock()
class CustomBuildPy(OrigBuildPy):
"""
Some projects have custom commands inheriting from `distutils`
"""
def get_data_files(self):
using_custom_command_guard()
return super().get_data_files()
setup_attrs = {**SETUP_ATTRS, 'include_package_data': True}
assert setup_attrs['package_data']
dist = Distribution(setup_attrs)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Make sure we use the custom command
cmd.cmdclass = {'build_py': CustomBuildPy}
cmd.distribution.cmdclass = {'build_py': CustomBuildPy}
assert cmd.distribution.get_command_class('build_py') == CustomBuildPy
msg = "setuptools instead of distutils"
with quiet(), pytest.warns(SetuptoolsDeprecationWarning, match=msg):
cmd.run()
using_custom_command_guard.assert_called()
self.assert_package_data_in_manifest(cmd)
def test_setup_py_exists(self):
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'foo.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert 'setup.py' in manifest
def test_setup_py_missing(self):
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'foo.py'
cmd = sdist(dist)
cmd.ensure_finalized()
if os.path.exists("setup.py"):
os.remove("setup.py")
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert 'setup.py' not in manifest
def test_setup_py_excluded(self):
with open("MANIFEST.in", "w", encoding="utf-8") as manifest_file:
manifest_file.write("exclude setup.py")
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'foo.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert 'setup.py' not in manifest
def test_defaults_case_sensitivity(self, source_dir):
"""
Make sure default files (README.*, etc.) are added in a case-sensitive
way to avoid problems with packages built on Windows.
"""
touch(source_dir / 'readme.rst')
touch(source_dir / 'SETUP.cfg')
dist = Distribution(SETUP_ATTRS)
# the extension deliberately capitalized for this test
# to make sure the actual filename (not capitalized) gets added
# to the manifest
dist.script_name = 'setup.PY'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
# lowercase all names so we can test in a
# case-insensitive way to make sure the files
# are not included.
manifest = map(lambda x: x.lower(), cmd.filelist.files)
assert 'readme.rst' not in manifest, manifest
assert 'setup.py' not in manifest, manifest
assert 'setup.cfg' not in manifest, manifest
def test_exclude_dev_only_cache_folders(self, source_dir):
included = {
# Emulate problem in https://github.com/pypa/setuptools/issues/4601
"MANIFEST.in": (
"global-include LICEN[CS]E* COPYING* NOTICE* AUTHORS*\n"
"global-include *.txt\n"
),
# For the sake of being conservative and limiting unforeseen side-effects
# we just exclude dev-only cache folders at the root of the repository:
"test/.venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
"src/.nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
"doc/.tox/default/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
# Let's test against false positives with similarly named files:
".venv-requirements.txt": "",
".tox-coveragerc.txt": "",
".noxy/coveragerc.txt": "",
}
excluded = {
# .tox/.nox/.venv are well-know folders present at the root of Python repos
# and therefore should be excluded
".tox/release/lib/python3.11/site-packages/foo-4.dist-info/LICENSE": "",
".nox/py/lib/python3.12/site-packages/bar-2.dist-info/COPYING.txt": "",
".venv/lib/python3.9/site-packages/bar-2.dist-info/AUTHORS.rst": "",
}
for file, content in {**excluded, **included}.items():
Path(source_dir, file).parent.mkdir(parents=True, exist_ok=True)
Path(source_dir, file).write_text(content, encoding="utf-8")
cmd = self.setup_with_extension()
self.assert_package_data_in_manifest(cmd)
manifest = {f.replace(os.sep, '/') for f in cmd.filelist.files}
for path in excluded:
assert os.path.exists(path)
assert path not in manifest, (path, manifest)
for path in included:
assert os.path.exists(path)
assert path in manifest, (path, manifest)
@fail_on_ascii
def test_manifest_is_written_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# UTF-8 filename
filename = os.path.join('sdist_test', 'smörbröd.py')
# Must create the file or it will get stripped.
touch(filename)
# Add UTF-8 filename and write manifest
with quiet():
mm.run()
mm.filelist.append(filename)
mm.write_manifest()
contents = read_all_bytes(mm.manifest)
# The manifest should be UTF-8 encoded
u_contents = contents.decode('UTF-8')
# The manifest should contain the UTF-8 filename
assert posix(filename) in u_contents
@fail_on_ascii
def test_write_manifest_allows_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
filename = os.path.join(b'sdist_test', Filenames.utf_8)
# Must touch the file or risk removal
touch(filename)
# Add filename and write manifest
with quiet():
mm.run()
u_filename = filename.decode('utf-8')
mm.filelist.files.append(u_filename)
# Re-write manifest
mm.write_manifest()
contents = read_all_bytes(mm.manifest)
# The manifest should be UTF-8 encoded
contents.decode('UTF-8')
# The manifest should contain the UTF-8 filename
assert posix(filename) in contents
# The filelist should have been updated as well
assert u_filename in mm.filelist.files
@skip_under_xdist
def test_write_manifest_skips_non_utf8_filenames(self):
"""
Files that cannot be encoded to UTF-8 (specifically, those that
weren't originally successfully decoded and have surrogate
escapes) should be omitted from the manifest.
See https://bitbucket.org/tarek/distribute/issue/303 for history.
"""
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
mm = manifest_maker(dist)
mm.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
os.mkdir('sdist_test.egg-info')
# Latin-1 filename
filename = os.path.join(b'sdist_test', Filenames.latin_1)
# Add filename with surrogates and write manifest
with quiet():
mm.run()
u_filename = filename.decode('utf-8', 'surrogateescape')
mm.filelist.append(u_filename)
# Re-write manifest
mm.write_manifest()
contents = read_all_bytes(mm.manifest)
# The manifest should be UTF-8 encoded
contents.decode('UTF-8')
# The Latin-1 filename should have been skipped
assert posix(filename) not in contents
# The filelist should have been updated as well
assert u_filename not in mm.filelist.files
@fail_on_ascii
def test_manifest_is_read_with_utf8_encoding(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
with quiet():
cmd.run()
# Add UTF-8 filename to manifest
filename = os.path.join(b'sdist_test', Filenames.utf_8)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b'\n' + filename)
manifest.close()
# The file must exist to be included in the filelist
touch(filename)
# Re-read manifest
cmd.filelist.files = []
with quiet():
cmd.read_manifest()
# The filelist should contain the UTF-8 filename
filename = filename.decode('utf-8')
assert filename in cmd.filelist.files
@fail_on_latin1_encoded_filenames
def test_read_manifest_skips_non_utf8_filenames(self):
# Test for #303.
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Create manifest
with quiet():
cmd.run()
# Add Latin-1 filename to manifest
filename = os.path.join(b'sdist_test', Filenames.latin_1)
cmd.manifest = os.path.join('sdist_test.egg-info', 'SOURCES.txt')
manifest = open(cmd.manifest, 'ab')
manifest.write(b'\n' + filename)
manifest.close()
# The file must exist to be included in the filelist
touch(filename)
# Re-read manifest
cmd.filelist.files = []
with quiet():
cmd.read_manifest()
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
assert filename not in cmd.filelist.files
@fail_on_ascii
@fail_on_latin1_encoded_filenames
def test_sdist_with_utf8_encoded_filename(self):
# Test for #303.
dist = Distribution(self.make_strings(SETUP_ATTRS))
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
filename = os.path.join(b'sdist_test', Filenames.utf_8)
touch(filename)
with quiet():
cmd.run()
if sys.platform == 'darwin':
filename = decompose(filename)
fs_enc = sys.getfilesystemencoding()
if sys.platform == 'win32':
if fs_enc == 'cp1252':
# Python mangles the UTF-8 filename
filename = filename.decode('cp1252')
assert filename in cmd.filelist.files
else:
filename = filename.decode('mbcs')
assert filename in cmd.filelist.files
else:
filename = filename.decode('utf-8')
assert filename in cmd.filelist.files
@classmethod
def make_strings(cls, item):
if isinstance(item, dict):
return {key: cls.make_strings(value) for key, value in item.items()}
if isinstance(item, list):
return list(map(cls.make_strings, item))
return str(item)
@fail_on_latin1_encoded_filenames
@skip_under_xdist
def test_sdist_with_latin1_encoded_filename(self):
# Test for #303.
dist = Distribution(self.make_strings(SETUP_ATTRS))
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
# Latin-1 filename
filename = os.path.join(b'sdist_test', Filenames.latin_1)
touch(filename)
assert os.path.isfile(filename)
with quiet():
cmd.run()
# not all windows systems have a default FS encoding of cp1252
if sys.platform == 'win32':
# Latin-1 is similar to Windows-1252 however
# on mbcs filesys it is not in latin-1 encoding
fs_enc = sys.getfilesystemencoding()
if fs_enc != 'mbcs':
fs_enc = 'latin-1'
filename = filename.decode(fs_enc)
assert filename in cmd.filelist.files
else:
# The Latin-1 filename should have been skipped
filename = filename.decode('latin-1')
assert filename not in cmd.filelist.files
_EXAMPLE_DIRECTIVES = {
"setup.cfg - long_description and version": """
[metadata]
name = testing
version = file: src/VERSION.txt
license_files = DOWHATYOUWANT
long_description = file: README.rst, USAGE.rst
""",
"pyproject.toml - static readme/license files and dynamic version": """
[project]
name = "testing"
readme = "USAGE.rst"
license-files = ["DOWHATYOUWANT"]
dynamic = ["version"]
[tool.setuptools.dynamic]
version = {file = ["src/VERSION.txt"]}
""",
"pyproject.toml - directive with str instead of list": """
[project]
name = "testing"
readme = "USAGE.rst"
license-files = ["DOWHATYOUWANT"]
dynamic = ["version"]
[tool.setuptools.dynamic]
version = {file = "src/VERSION.txt"}
""",
"pyproject.toml - deprecated license table with file entry": """
[project]
name = "testing"
readme = "USAGE.rst"
license = {file = "DOWHATYOUWANT"}
dynamic = ["version"]
[tool.setuptools.dynamic]
version = {file = "src/VERSION.txt"}
""",
}
@pytest.mark.parametrize("config", _EXAMPLE_DIRECTIVES.keys())
@pytest.mark.filterwarnings(
"ignore:.project.license. as a TOML table is deprecated"
)
def test_add_files_referenced_by_config_directives(self, source_dir, config):
config_file, _, _ = config.partition(" - ")
config_text = self._EXAMPLE_DIRECTIVES[config]
(source_dir / 'src').mkdir()
(source_dir / 'src/VERSION.txt').write_text("0.42", encoding="utf-8")
(source_dir / 'README.rst').write_text("hello world!", encoding="utf-8")
(source_dir / 'USAGE.rst').write_text("hello world!", encoding="utf-8")
(source_dir / 'DOWHATYOUWANT').write_text("hello world!", encoding="utf-8")
(source_dir / config_file).write_text(config_text, encoding="utf-8")
dist = Distribution({"packages": []})
dist.script_name = 'setup.py'
dist.parse_config_files()
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
assert (
'src/VERSION.txt' in cmd.filelist.files
or 'src\\VERSION.txt' in cmd.filelist.files
)
assert 'USAGE.rst' in cmd.filelist.files
assert 'DOWHATYOUWANT' in cmd.filelist.files
assert '/' not in cmd.filelist.files
assert '\\' not in cmd.filelist.files
def test_pyproject_toml_in_sdist(self, source_dir):
"""
Check if pyproject.toml is included in source distribution if present
"""
touch(source_dir / 'pyproject.toml')
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert 'pyproject.toml' in manifest
def test_pyproject_toml_excluded(self, source_dir):
"""
Check that pyproject.toml can excluded even if present
"""
touch(source_dir / 'pyproject.toml')
with open('MANIFEST.in', 'w', encoding="utf-8") as mts:
print('exclude pyproject.toml', file=mts)
dist = Distribution(SETUP_ATTRS)
dist.script_name = 'setup.py'
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert 'pyproject.toml' not in manifest
def test_build_subcommand_source_files(self, source_dir):
touch(source_dir / '.myfile~')
# Sanity check: without custom commands file list should not be affected
dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert '.myfile~' not in manifest
# Test: custom command should be able to augment file list
dist = Distribution({**SETUP_ATTRS, "script_name": "setup.py"})
build = dist.get_command_obj("build")
build.sub_commands = [*build.sub_commands, ("build_custom", None)]
class build_custom(Command):
def initialize_options(self): ...
def finalize_options(self): ...
def run(self): ...
def get_source_files(self):
return ['.myfile~']
dist.cmdclass.update(build_custom=build_custom)
cmd = sdist(dist)
cmd.use_defaults = True
cmd.ensure_finalized()
with quiet():
cmd.run()
manifest = cmd.filelist.files
assert '.myfile~' in manifest
@pytest.mark.skipif("os.environ.get('SETUPTOOLS_USE_DISTUTILS') == 'stdlib'")
def test_build_base_pathlib(self, source_dir):
"""
Ensure if build_base is a pathlib.Path, the build still succeeds.
"""
dist = Distribution({
**SETUP_ATTRS,
"script_name": "setup.py",
"options": {"build": {"build_base": pathlib.Path('build')}},
})
cmd = sdist(dist)
cmd.ensure_finalized()
with quiet():
cmd.run()
def test_default_revctrl():
"""
When _default_revctrl was removed from the `setuptools.command.sdist`
module in 10.0, it broke some systems which keep an old install of
setuptools (Distribute) around. Those old versions require that the
setuptools package continue to implement that interface, so this
function provides that interface, stubbed. See #320 for details.
This interface must be maintained until Ubuntu 12.04 is no longer
supported (by Setuptools).
"""
(ep,) = metadata.EntryPoints._from_text(
"""
[setuptools.file_finders]
svn_cvs = setuptools.command.sdist:_default_revctrl
"""
)
res = ep.load()
assert hasattr(res, '__iter__')
| TestSdistTest |
python | pypa__pip | src/pip/_internal/operations/check.py | {
"start": 829,
"end": 5894
} | class ____(NamedTuple):
version: Version
dependencies: list[Requirement]
# Shorthands
PackageSet = dict[NormalizedName, PackageDetails]
Missing = tuple[NormalizedName, Requirement]
Conflicting = tuple[NormalizedName, Version, Requirement]
MissingDict = dict[NormalizedName, list[Missing]]
ConflictingDict = dict[NormalizedName, list[Conflicting]]
CheckResult = tuple[MissingDict, ConflictingDict]
ConflictDetails = tuple[PackageSet, CheckResult]
def create_package_set_from_installed() -> tuple[PackageSet, bool]:
"""Converts a list of distributions into a PackageSet."""
package_set = {}
problems = False
env = get_default_environment()
for dist in env.iter_installed_distributions(local_only=False, skip=()):
name = dist.canonical_name
try:
dependencies = list(dist.iter_dependencies())
package_set[name] = PackageDetails(dist.version, dependencies)
except (OSError, ValueError) as e:
# Don't crash on unreadable or broken metadata.
logger.warning("Error parsing dependencies of %s: %s", name, e)
problems = True
return package_set, problems
def check_package_set(
package_set: PackageSet, should_ignore: Callable[[str], bool] | None = None
) -> CheckResult:
"""Check if a package set is consistent
If should_ignore is passed, it should be a callable that takes a
package name and returns a boolean.
"""
missing = {}
conflicting = {}
for package_name, package_detail in package_set.items():
# Info about dependencies of package_name
missing_deps: set[Missing] = set()
conflicting_deps: set[Conflicting] = set()
if should_ignore and should_ignore(package_name):
continue
for req in package_detail.dependencies:
name = canonicalize_name(req.name)
# Check if it's missing
if name not in package_set:
missed = True
if req.marker is not None:
missed = req.marker.evaluate({"extra": ""})
if missed:
missing_deps.add((name, req))
continue
# Check if there's a conflict
version = package_set[name].version
if not req.specifier.contains(version, prereleases=True):
conflicting_deps.add((name, version, req))
if missing_deps:
missing[package_name] = sorted(missing_deps, key=str)
if conflicting_deps:
conflicting[package_name] = sorted(conflicting_deps, key=str)
return missing, conflicting
def check_install_conflicts(to_install: list[InstallRequirement]) -> ConflictDetails:
"""For checking if the dependency graph would be consistent after \
installing given requirements
"""
# Start from the current state
package_set, _ = create_package_set_from_installed()
# Install packages
would_be_installed = _simulate_installation_of(to_install, package_set)
# Only warn about directly-dependent packages; create a whitelist of them
whitelist = _create_whitelist(would_be_installed, package_set)
return (
package_set,
check_package_set(
package_set, should_ignore=lambda name: name not in whitelist
),
)
def check_unsupported(
packages: Iterable[BaseDistribution],
supported_tags: Iterable[Tag],
) -> Generator[BaseDistribution, None, None]:
for p in packages:
with suppress(FileNotFoundError):
wheel_file = p.read_text("WHEEL")
wheel_tags: frozenset[Tag] = reduce(
frozenset.union,
map(parse_tag, Parser().parsestr(wheel_file).get_all("Tag", [])),
frozenset(),
)
if wheel_tags.isdisjoint(supported_tags):
yield p
def _simulate_installation_of(
to_install: list[InstallRequirement], package_set: PackageSet
) -> set[NormalizedName]:
"""Computes the version of packages after installing to_install."""
# Keep track of packages that were installed
installed = set()
# Modify it as installing requirement_set would (assuming no errors)
for inst_req in to_install:
abstract_dist = make_distribution_for_install_requirement(inst_req)
dist = abstract_dist.get_metadata_distribution()
name = dist.canonical_name
package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
installed.add(name)
return installed
def _create_whitelist(
would_be_installed: set[NormalizedName], package_set: PackageSet
) -> set[NormalizedName]:
packages_affected = set(would_be_installed)
for package_name in package_set:
if package_name in packages_affected:
continue
for req in package_set[package_name].dependencies:
if canonicalize_name(req.name) in packages_affected:
packages_affected.add(package_name)
break
return packages_affected
| PackageDetails |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/widgets/main_widget.py | {
"start": 3292,
"end": 3607
} | class ____:
Default = "default_section"
Conda = "conda_section"
Pyenv = "pyenv_section"
Custom = "custom_section"
Other = "other_section"
Submenus = "submenus_section"
# ---- Widgets
# ----------------------------------------------------------------------------
| EnvironmentConsolesMenuSections |
python | django__django | tests/prefetch_related/models.py | {
"start": 2714,
"end": 2769
} | class ____(ModelIterable):
pass
| ModelIterableSubclass |
python | skorch-dev__skorch | skorch/tests/test_hf.py | {
"start": 17689,
"end": 19686
} | class ____(_HuggingfaceTokenizersBaseTest):
vocab_size = 123
@pytest.fixture(scope='module', params=['as string', 'as instance'])
def tokenizer(self, request, data):
# pylint: disable=missing-function-docstring
from transformers import AutoTokenizer
from skorch.hf import HuggingfacePretrainedTokenizer
kwargs = {'train': True, 'vocab_size': self.vocab_size}
if request.param == 'as string':
return HuggingfacePretrainedTokenizer('bert-base-cased', **kwargs).fit(data)
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
return HuggingfacePretrainedTokenizer(tokenizer, **kwargs).fit(data)
def test_fit_with_generator(self, tokenizer, data):
# does not raise
tokenizer.fit(row for row in data)
def test_vocab_size_argument_honored(self, tokenizer):
vocab_size = len(tokenizer.vocabulary_)
assert vocab_size == self.vocab_size
def test_vocab_size_argument_none(self, data):
# If not set explicitly, the vocab size should be the same one as of the
# original tokenizer. However, for this test, we don't have enough data
# to reach that vocab size (28996). Therefore, we test instead that the
# vocab size is considerably greater than the one seen when we set
# vocab_size explictly.
from transformers import AutoTokenizer
from skorch.hf import HuggingfacePretrainedTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
hf_tokenizer = HuggingfacePretrainedTokenizer(tokenizer, train=True).fit(data)
# The vocab_size is much bigger than in the previous test
vocab_size = len(hf_tokenizer.vocabulary_)
assert vocab_size >= 100 + self.vocab_size
def test_fixed_vocabulary(self, tokenizer):
assert tokenizer.fixed_vocabulary_ is False
# The class is defined on top level so that it can be pickled
| TestHuggingfacePretrainedTokenizerWithFit |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 12322,
"end": 13084
} | class ____(_scale_color_discrete):
"""
A discrete color scales using Matplotlib colormaps
See Also
--------
[](`matplotlib.cm`)
[](`matplotlib.colors`)
mizani.palettes.cmap_pal : The palette class that generates
the colours of this scale.
"""
cmap_name: InitVar[str] = "viridis"
"""
A standard Matplotlib colormap name. The default is `viridis`.
For the list of names checkout the output of
`matplotlib.cm.cmap_d.keys()` or see the
`documentation <http://matplotlib.org/users/colormaps.html>`_.
"""
def __post_init__(self, cmap_name):
from mizani.palettes import cmap_d_pal
super().__post_init__()
self.palette = cmap_d_pal(cmap_name)
@dataclass
| scale_color_cmap_d |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/benchmarks/optimize_benchmark.py | {
"start": 952,
"end": 6485
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for static optimizations."""
def benchmark_map_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_fusion(
chain_length=chain_length, optimize_dataset=False)
self._benchmark_map_fusion(
chain_length=chain_length, optimize_dataset=True)
def _benchmark_map_fusion(self, chain_length, optimize_dataset):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
if optimize_dataset:
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_fusion = True
dataset = dataset.with_options(options)
opt_mark = "opt" if optimize_dataset else "noopt"
self.run_and_report_benchmark(
dataset=dataset,
num_elements=100,
iters=10,
warmup=True,
extras={
"model_name": "optimize.benchmark.1",
"parameters": "%d.%s" % (chain_length, optimize_dataset),
},
name="map_fusion_{}_chain_length_{}".format(opt_mark, chain_length))
def benchmark_map_and_filter_fusion(self):
"""Evaluates performance map of fusion."""
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_map_and_filter_fusion(
chain_length=chain_length, optimize_dataset=False)
self._benchmark_map_and_filter_fusion(
chain_length=chain_length, optimize_dataset=True)
def _benchmark_map_and_filter_fusion(self, chain_length, optimize_dataset):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x + 5).filter(
lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.map_and_filter_fusion = True
dataset = dataset.with_options(options)
opt_mark = "opt" if optimize_dataset else "noopt"
self.run_and_report_benchmark(
dataset=dataset,
num_elements=100,
iters=10,
warmup=True,
extras={
"model_name": "optimize.benchmark.2",
"parameters": "%d.%s" % (chain_length, optimize_dataset),
},
name="map_and_filter_fusion_{}_chain_length_{}".format(
opt_mark, chain_length))
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter fusion.
def benchmark_filter_fusion(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_filter_fusion(
chain_length=chain_length, optimize_dataset=False)
self._benchmark_filter_fusion(
chain_length=chain_length, optimize_dataset=True)
def _benchmark_filter_fusion(self, chain_length, optimize_dataset):
dataset = dataset_ops.Dataset.from_tensors(5).repeat(None)
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_fusion = True
dataset = dataset.with_options(options)
opt_mark = "opt" if optimize_dataset else "noopt"
self.run_and_report_benchmark(
dataset=dataset,
num_elements=100,
iters=10,
warmup=True,
extras={
"model_name": "optimize.benchmark.3",
"parameters": "%d.%s" % (chain_length, optimize_dataset),
},
name="filter_fusion_{}_chain_length_{}".format(opt_mark, chain_length))
# This benchmark compares the performance of pipeline with multiple chained
# filter with and without filter parallelization.
def benchmark_filter_parallelization(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
self._benchmark_filter_parallelization(
chain_length=chain_length, optimize_dataset=False)
self._benchmark_filter_parallelization(
chain_length=chain_length, optimize_dataset=True)
def _benchmark_filter_parallelization(self, chain_length, optimize_dataset):
dataset = dataset_ops.Dataset.from_tensors(5).repeat()
for _ in range(chain_length):
dataset = dataset.filter(lambda x: math_ops.greater_equal(x - 5, 0))
if optimize_dataset:
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_optimization.filter_parallelization = True
dataset = dataset.with_options(options)
opt_mark = "opt" if optimize_dataset else "noopt"
self.run_and_report_benchmark(
dataset=dataset,
num_elements=100,
iters=10,
warmup=True,
extras={
"model_name": "optimize.benchmark.4",
"parameters": "%d.%s" % (chain_length, optimize_dataset),
},
name="filter_parallelization_{}_chain_length_{}".format(opt_mark,
chain_length))
if __name__ == "__main__":
benchmark_base.test.main()
| OptimizationBenchmark |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/slides/image_extractor.py | {
"start": 216,
"end": 4158
} | class ____:
"""
Handles image extraction and captioning for PowerPoint slides.
Uses vision transformer models for image captioning.
"""
def __init__(self):
"""Initialize image extractor with vision models."""
self.vision_models = None
self._initialize_vision_models()
def _initialize_vision_models(self) -> None:
"""Initialize vision transformer models for image captioning."""
try:
import torch # noqa
from PIL import Image # noqa
from transformers import (
AutoTokenizer,
VisionEncoderDecoderModel,
ViTFeatureExtractor,
)
except ImportError:
raise ImportError(
"Missing required dependencies for image extraction and captioning.\n"
"Please install the following packages:\n"
" pip install 'torch>=2.7.1' 'transformers<4.50' 'pillow>=11.2.1'\n\n"
"Note: This feature requires PyTorch and transformers for AI-powered image captioning.\n"
"If you don't need image extraction, set extract_images=False when initializing PptxReader."
)
model = VisionEncoderDecoderModel.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
feature_extractor = ViTFeatureExtractor.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
tokenizer = AutoTokenizer.from_pretrained(
"nlpconnect/vit-gpt2-image-captioning"
)
self.vision_models = {
"feature_extractor": feature_extractor,
"model": model,
"tokenizer": tokenizer,
}
def caption_image_from_file(self, image_path: str) -> str:
"""
Generate caption for image from file path.
Args:
image_path: Path to image file
Returns:
Image caption text
"""
if not self.vision_models:
raise RuntimeError(
"Image captioning not available - vision models not loaded"
)
from PIL import Image
from llama_index.core.utils import infer_torch_device
model = self.vision_models["model"]
feature_extractor = self.vision_models["feature_extractor"]
tokenizer = self.vision_models["tokenizer"]
device = infer_torch_device()
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
i_image = Image.open(image_path)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
pixel_values = feature_extractor(
images=[i_image], return_tensors="pt"
).pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
return preds[0].strip()
def extract_image_data(self, shape, slide_number: int) -> Dict[str, Any]:
"""
Extract image data and caption from PowerPoint shape.
Args:
shape: PowerPoint shape containing image
slide_number: Slide number for context
Returns:
Dictionary with image metadata and caption
"""
# Use temp file approach like original code
image_bytes = shape.image.blob
f = tempfile.NamedTemporaryFile(
"wb", delete=False, suffix=f".{shape.image.ext}"
)
try:
f.write(image_bytes)
f.close()
caption = self.caption_image_from_file(f.name)
finally:
os.unlink(f.name)
return {
"type": "image",
"format": shape.image.ext,
"caption": caption,
"content": caption,
}
| ImageExtractor |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 3668,
"end": 4573
} | class ____(nn.Module):
def __init__(self, config: Glm4vVisionConfig) -> None:
super().__init__()
self.patch_size = config.patch_size
self.temporal_patch_size = config.temporal_patch_size
self.in_channels = config.in_channels
self.embed_dim = config.hidden_size
kernel_size = [self.temporal_patch_size, self.patch_size, self.patch_size]
self.proj = nn.Conv3d(self.in_channels, self.embed_dim, kernel_size=kernel_size, stride=kernel_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
target_dtype = self.proj.weight.dtype
hidden_states = hidden_states.view(
-1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
)
hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
return hidden_states
| Glm4vVisionPatchEmbed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.