language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | chroma-core__chroma | chromadb/errors.py | {
"start": 1574,
"end": 1709
} | class ____(ChromaError):
@classmethod
@overrides
def name(cls) -> str:
return "InvalidHTTPVersion"
| InvalidHTTPVersion |
python | google__jax | jax/_src/pallas/core.py | {
"start": 8297,
"end": 8675
} | class ____:
grid: GridMappingGrid
mapped_dims: tuple[int, ...]
def size(self, axis: int) -> int | DynamicGridDim:
valid_grid = tuple(self.grid)
try:
size = valid_grid[axis]
except IndexError as e:
raise ValueError(
f"Axis {axis} is out of bounds for grid {self.grid}"
) from e
return size
@dataclasses.dataclass
| PallasGridContext |
python | pydata__xarray | xarray/compat/pdcompat.py | {
"start": 2130,
"end": 3474
} | class ____(Enum):
"""Used by pandas to specify a default value for a deprecated argument.
Copied from pandas._libs.lib._NoDefault.
See also:
- pandas-dev/pandas#30788
- pandas-dev/pandas#40684
- pandas-dev/pandas#40715
- pandas-dev/pandas#47045
"""
no_default = "NO_DEFAULT"
def __repr__(self) -> str:
return "<no_default>"
no_default = (
_NoDefault.no_default
) # Sentinel indicating the default value following pandas
NoDefault = Literal[_NoDefault.no_default] # For typing following pandas
def timestamp_as_unit(date: pd.Timestamp, unit: PDDatetimeUnitOptions) -> pd.Timestamp:
"""Convert the underlying int64 representation to the given unit.
Compatibility function for pandas issue where "as_unit" is not defined
for pandas.Timestamp in pandas versions < 2.2. Can be removed minimum
pandas version is >= 2.2.
"""
if hasattr(date, "as_unit"):
date = date.as_unit(unit)
elif hasattr(date, "_as_unit"):
date = date._as_unit(unit)
return date
def default_precision_timestamp(*args, **kwargs) -> pd.Timestamp:
"""Return a Timestamp object with the default precision.
Xarray default is "ns".
"""
dt = pd.Timestamp(*args, **kwargs)
if dt.unit != "ns":
dt = timestamp_as_unit(dt, "ns")
return dt
| _NoDefault |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_event_details.py | {
"start": 6887,
"end": 17214
} | class ____(
GroupEventDetailsEndpointTestBase, APITestCase, SnubaTestCase, OccurrenceTestMixin
):
def test_get_simple_helpful(self) -> None:
self.event_d = self.store_event(
data={
"event_id": "d" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"contexts": {
"replay": {"replay_id": uuid.uuid4().hex},
"trace": {
"sampled": True,
"span_id": "babaae0d4b7512d9",
"trace_id": "a7d67cf796774551a95be6543cacd459",
},
},
"errors": [],
},
project_id=self.project_1.id,
)
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_d.event_id)
assert response.data["previousEventID"] == self.event_c.event_id
assert response.data["nextEventID"] is None
def test_get_helpful_event_id(self) -> None:
"""
When everything else is equal, the event_id should be used to break ties.
"""
timestamp = before_now(minutes=1).isoformat()
self.event_d = self.store_event(
data={
"event_id": "d" * 32,
"environment": "staging",
"timestamp": timestamp,
"fingerprint": ["group-1"],
"contexts": {},
"errors": [],
},
project_id=self.project_1.id,
)
self.event_e = self.store_event(
data={
"event_id": "e" * 32,
"environment": "staging",
"timestamp": timestamp,
"fingerprint": ["group-1"],
"contexts": {},
"errors": [],
},
project_id=self.project_1.id,
)
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_e.event_id)
assert response.data["previousEventID"] == self.event_d.event_id
assert response.data["nextEventID"] is None
def test_get_helpful_replay_id_order(self) -> None:
replay_id_1 = uuid.uuid4().hex
replay_id_2 = uuid.uuid4().hex
replay_id_1 = "b" + replay_id_1[1:]
replay_id_2 = "a" + replay_id_2[1:]
self.event_d = self.store_event(
data={
"event_id": "d" * 32,
"environment": "staging",
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-order"],
"contexts": {
"replay": {"replay_id": replay_id_1},
},
},
project_id=self.project_1.id,
)
self.event_e = self.store_event(
data={
"event_id": "e" * 32,
"environment": "staging",
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-order"],
"contexts": {
"replay": {"replay_id": replay_id_2},
},
},
project_id=self.project_1.id,
)
self.event_f = self.store_event(
data={
"event_id": "f" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-order"],
},
project_id=self.project_1.id,
)
url = f"/api/0/issues/{self.event_d.group.id}/events/recommended/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_e.event_id)
assert response.data["previousEventID"] == str(self.event_d.event_id)
assert response.data["nextEventID"] == str(self.event_f.event_id)
def test_with_empty_query(self) -> None:
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, {"query": ""}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_c.event_id)
assert response.data["previousEventID"] == str(self.event_b.event_id)
assert response.data["nextEventID"] is None
def test_issue_filter_query_ignored(self) -> None:
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, {"query": "is:unresolved"}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_c.event_id)
assert response.data["previousEventID"] == str(self.event_b.event_id)
assert response.data["nextEventID"] is None
def test_event_release_query(self) -> None:
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, {"query": f"release:{self.release_version}"}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_c.event_id)
assert response.data["previousEventID"] == str(self.event_b.event_id)
assert response.data["nextEventID"] is None
def test_event_release_semver_query(self) -> None:
event_g = self.store_event(
data={
"event_id": "1" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-4"],
"release": "test@1.2.3",
},
project_id=self.project_1.id,
)
release = Release.objects.filter(version="test@1.2.3").get()
assert release.version == "test@1.2.3"
assert release.is_semver_release
url = f"/api/0/issues/{event_g.group.id}/events/recommended/"
response = self.client.get(url, {"query": f"{SEMVER_ALIAS}:1.2.3"}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(event_g.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] is None
def test_has_environment(self) -> None:
url = f"/api/0/issues/{self.event_a.group.id}/events/recommended/"
response = self.client.get(url, {"query": "has:environment"}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(self.event_c.event_id)
assert response.data["previousEventID"] == str(self.event_b.event_id)
assert response.data["nextEventID"] is None
def test_skipped_snuba_fields_ignored(self) -> None:
event_e = self.store_event(
data={
"event_id": "e" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-4"],
"contexts": {
"replay": {"replay_id": uuid.uuid4().hex},
"trace": {
"sampled": True,
"span_id": "babaae0d4b7512d9",
"trace_id": "a7d67cf796774551a95be6543cacd459",
},
},
"errors": [],
},
project_id=self.project_1.id,
)
event_f = self.store_event(
data={
"event_id": "f" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-4"],
},
project_id=self.project_1.id,
)
group = event_e.group
group.status = GroupStatus.RESOLVED
group.substatus = None
group.save(update_fields=["status", "substatus"])
url = f"/api/0/issues/{group.id}/events/recommended/"
response = self.client.get(url, {"query": "is:unresolved has:environment"}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(event_e.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] == str(event_f.event_id)
def test_query_title(self) -> None:
title = "four score and seven years ago"
event_e = self.store_event(
data={
"event_id": "e" * 32,
"environment": "staging",
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-title"],
"message": title,
},
project_id=self.project_1.id,
)
url = f"/api/0/issues/{event_e.group.id}/events/recommended/"
response = self.client.get(url, {"query": f'title:"{title}"'}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(event_e.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] is None
def test_query_issue_platform_title(self) -> None:
issue_title = "king of england"
occurrence, group_info = self.process_occurrence(
project_id=self.project.id,
issue_title=issue_title,
event_data={"level": "info"},
)
assert group_info is not None
url = f"/api/0/issues/{group_info.group.id}/events/recommended/"
response = self.client.get(url, {"query": f'title:"{issue_title}"'}, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(occurrence.event_id)
assert response.data["previousEventID"] is None
assert response.data["nextEventID"] is None
| GroupEventDetailsHelpfulEndpointTest |
python | pytorch__pytorch | test/test_fx_passes.py | {
"start": 24320,
"end": 24936
} | class ____:
# This test case is for pattern where no matching anchor is found in the target graph
# `anchor` is the starting point of the pattern matching, it's usually the boundary returning nodes
@staticmethod
def forward(x):
x = x + 1
return x
@staticmethod
def pattern(a):
b1 = a.relu()
return b1
test_cases = [
# match_output, match_placeholder, num_matches
TestCase(False, False, 0),
TestCase(True, False, 0),
TestCase(False, True, 0),
TestCase(True, True, 0)
]
@instantiate_parametrized_tests
| NoAnchorFound |
python | apache__airflow | devel-common/src/sphinx_exts/docs_build/spelling_checks.py | {
"start": 1338,
"end": 7345
} | class ____(NamedTuple):
"""Spelling errors found when building docs."""
file_path: Path | None
line_no: int | None
spelling: str | None
suggestion: str | None
context_line: str | None
message: str
def __eq__(self, other):
left = (
self.file_path,
self.line_no,
self.spelling,
self.context_line,
self.message,
)
right = (
other.file_path,
other.line_no,
other.spelling,
other.context_line,
other.message,
)
return left == right
def __hash__(self):
return hash((self.file_path, self.line_no, self.spelling, self.context_line, self.message))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
file_path_a: Path = self.file_path or Path("/")
file_path_b: Path = other.file_path or Path("/")
line_no_a: int = self.line_no or 0
line_no_b: int = other.line_no or 0
context_line_a: str = self.context_line or ""
context_line_b: str = other.context_line or ""
left: tuple[Path, int, str, str, str] = (
file_path_a,
line_no_a,
context_line_a,
self.spelling or "",
self.message or "",
)
right: tuple[Path, int, str, str, str] = (
file_path_b,
line_no_b,
context_line_b,
other.spelling or "",
other.message or "",
)
return left < right
def parse_spelling_warnings(warning_text: str, docs_dir: Path) -> list[SpellingError]:
"""
Parses warnings from Sphinx.
:param warning_text: warning to parse
:param docs_dir: documentation directory
:return: list of SpellingError.
"""
sphinx_spelling_errors = []
for sphinx_warning in warning_text.splitlines():
if not sphinx_warning:
continue
warning_parts = None
match = re.search(r"(.*):(\w*):\s\((\w*)\)\s?(\w*)\s?(.*)", sphinx_warning)
if match:
warning_parts = match.groups()
if warning_parts and len(warning_parts) == 5:
try:
sphinx_spelling_errors.append(
SpellingError(
file_path=docs_dir / warning_parts[0],
line_no=int(warning_parts[1]) if warning_parts[1] not in ("None", "") else None,
spelling=warning_parts[2],
suggestion=warning_parts[3] if warning_parts[3] else None,
context_line=warning_parts[4],
message=sphinx_warning,
)
)
except Exception:
# If an exception occurred while parsing the warning message, display the raw warning message.
sphinx_spelling_errors.append(
SpellingError(
file_path=None,
line_no=None,
spelling=None,
suggestion=None,
context_line=None,
message=sphinx_warning,
)
)
else:
sphinx_spelling_errors.append(
SpellingError(
file_path=None,
line_no=None,
spelling=None,
suggestion=None,
context_line=None,
message=sphinx_warning,
)
)
return sphinx_spelling_errors
def display_spelling_error_summary(spelling_errors: dict[str, list[SpellingError]]) -> None:
"""Displays summary of Spelling errors"""
console.print()
console.print("[red]" + "#" * 30 + " Start spelling errors summary " + "#" * 30 + "[/]")
console.print()
for package_name, errors in sorted(spelling_errors.items()):
if package_name:
console.print("=" * 30, f" [bright_blue]{package_name}[/] ", "=" * 30)
else:
console.print("=" * 30, " [bright_blue]General[/] ", "=" * 30)
for warning_no, error in enumerate(sorted(errors), 1):
console.print("-" * 30, f"Error {warning_no:3}", "-" * 30)
_display_error(error)
console.print("=" * 100)
console.print()
msg = """[green]
If there are spelling errors related to class or function name, make sure
those names are quoted with backticks '`' - this should exclude it from spellcheck process.
If there are spelling errors in the summary above, and the spelling is
correct, add the spelling to docs/spelling_wordlist.txt or use the
spelling directive.
Check https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#private-dictionaries
for more details.
If there are no spelling errors in the summary above, there might be an
issue unrelated to spelling. Please review the traceback.
"""
console.print(msg)
console.print()
console.print
console.print("[red]" + "#" * 30 + " End docs build errors summary " + "#" * 30 + "[/]")
console.print
def _display_error(error: SpellingError):
console.print(error.message)
console.print()
if error.file_path:
console.print(f"File path: {error.file_path.resolve()}")
if error.spelling:
console.print(f"[red]Incorrect Spelling: '{error.spelling}'")
if error.suggestion:
console.print(f"Suggested Spelling: '{error.suggestion}'")
if error.context_line:
console.print(f"Line with Error: '{error.context_line}'")
if (
error.file_path
and not error.file_path.as_posix().endswith("<unknown>")
and error.line_no
and os.path.isfile(error.file_path)
):
console.print(f"Line Number: {error.line_no}")
console.print(prepare_code_snippet(error.file_path, error.line_no))
| SpellingError |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/cyclic_reassignment.py | {
"start": 0,
"end": 67
} | class ____[_: foo](object): ...
[_] = (foo,) = foo
def foo(): ...
| foo |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 33168,
"end": 33705
} | class ____(Stmt):
"""An overlay scope for extensions. This is a largely unoptimized scope
that however can be used to introduce completely arbitrary variables into
a sub scope from a dictionary or dictionary like object. The `context`
field has to evaluate to a dictionary object.
Example usage::
OverlayScope(context=self.call_method('get_context'),
body=[...])
.. versionadded:: 2.10
"""
fields = ("context", "body")
context: Expr
body: t.List[Node]
| OverlayScope |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/nocover/test_drypython_returns.py | {
"start": 2736,
"end": 2812
} | class ____(_FirstBase[A, str], _SecondBase[float, D]):
pass
| MixedGenerics2 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 5683,
"end": 6746
} | class ____(graphene.ObjectType):
pool = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "PoolMetadataEntry"
def types():
return [
GrapheneMetadataEntry,
GrapheneTableColumnLineageMetadataEntry,
GrapheneTableSchemaMetadataEntry,
GrapheneTableMetadataEntry,
GrapheneFloatMetadataEntry,
GrapheneIntMetadataEntry,
GrapheneJsonMetadataEntry,
GrapheneBoolMetadataEntry,
GrapheneMarkdownMetadataEntry,
GrapheneMetadataItemDefinition,
GraphenePathMetadataEntry,
GrapheneNotebookMetadataEntry,
GraphenePythonArtifactMetadataEntry,
GrapheneTextMetadataEntry,
GrapheneUrlMetadataEntry,
GraphenePipelineRunMetadataEntry,
GrapheneAssetMetadataEntry,
GrapheneJobMetadataEntry,
GrapheneCodeReferencesMetadataEntry,
GrapheneNullMetadataEntry,
GrapheneTimestampMetadataEntry,
GraphenePoolMetadataEntry,
]
| GraphenePoolMetadataEntry |
python | paramiko__paramiko | paramiko/client.py | {
"start": 1462,
"end": 31919
} | class ____(ClosingContextManager):
"""
A high-level representation of a session with an SSH server. This class
wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most
aspects of authenticating and opening channels. A typical use case is::
client = SSHClient()
client.load_system_host_keys()
client.connect('ssh.example.com')
stdin, stdout, stderr = client.exec_command('ls -l')
You may pass in explicit overrides for authentication and server host key
checking. The default mechanism is to try to use local key files or an
SSH agent (if one is running).
Instances of this class may be used as context managers.
.. versionadded:: 1.6
"""
def __init__(self):
"""
Create a new SSHClient.
"""
self._system_host_keys = HostKeys()
self._host_keys = HostKeys()
self._host_keys_filename = None
self._log_channel = None
self._policy = RejectPolicy()
self._transport = None
self._agent = None
def load_system_host_keys(self, filename=None):
"""
Load host keys from a system (read-only) file. Host keys read with
this method will not be saved back by `save_host_keys`.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts).
If ``filename`` is left as ``None``, an attempt will be made to read
keys from the user's local "known hosts" file, as used by OpenSSH,
and no exception will be raised if the file can't be read. This is
probably only useful on posix.
:param str filename: the filename to read, or ``None``
:raises: ``IOError`` --
if a filename was provided and the file could not be read
"""
if filename is None:
# try the user's .ssh key file, and mask exceptions
filename = os.path.expanduser("~/.ssh/known_hosts")
try:
self._system_host_keys.load(filename)
except IOError:
pass
return
self._system_host_keys.load(filename)
def load_host_keys(self, filename):
"""
Load host keys from a local host-key file. Host keys read with this
method will be checked after keys loaded via `load_system_host_keys`,
but will be saved back by `save_host_keys` (so they can be modified).
The missing host key policy `.AutoAddPolicy` adds keys to this set and
saves them, when connecting to a previously-unknown server.
This method can be called multiple times. Each new set of host keys
will be merged with the existing set (new replacing old if there are
conflicts). When automatically saving, the last hostname is used.
:param str filename: the filename to read
:raises: ``IOError`` -- if the filename could not be read
"""
self._host_keys_filename = filename
self._host_keys.load(filename)
def save_host_keys(self, filename):
"""
Save the host keys back to a file. Only the host keys loaded with
`load_host_keys` (plus any added directly) will be saved -- not any
host keys loaded with `load_system_host_keys`.
:param str filename: the filename to save to
:raises: ``IOError`` -- if the file could not be written
"""
# update local host keys from file (in case other SSH clients
# have written to the known_hosts file meanwhile.
if self._host_keys_filename is not None:
self.load_host_keys(self._host_keys_filename)
with open(filename, "w") as f:
for hostname, keys in self._host_keys.items():
for keytype, key in keys.items():
f.write(
"{} {} {}\n".format(
hostname, keytype, key.get_base64()
)
)
def get_host_keys(self):
"""
Get the local `.HostKeys` object. This can be used to examine the
local host keys or change them.
:return: the local host keys as a `.HostKeys` object.
"""
return self._host_keys
def set_log_channel(self, name):
"""
Set the channel for logging. The default is ``"paramiko.transport"``
but it can be set to anything you want.
:param str name: new channel name for logging
"""
self._log_channel = name
def set_missing_host_key_policy(self, policy):
"""
Set policy to use when connecting to servers without a known host key.
Specifically:
* A **policy** is a "policy class" (or instance thereof), namely some
subclass of `.MissingHostKeyPolicy` such as `.RejectPolicy` (the
default), `.AutoAddPolicy`, `.WarningPolicy`, or a user-created
subclass.
* A host key is **known** when it appears in the client object's cached
host keys structures (those manipulated by `load_system_host_keys`
and/or `load_host_keys`).
:param .MissingHostKeyPolicy policy:
the policy to use when receiving a host key from a
previously-unknown server
"""
if inspect.isclass(policy):
policy = policy()
self._policy = policy
def _families_and_addresses(self, hostname, port):
"""
Yield pairs of address families and addresses to try for connecting.
:param str hostname: the server to connect to
:param int port: the server port to connect to
:returns: Yields an iterable of ``(family, address)`` tuples
"""
guess = True
addrinfos = socket.getaddrinfo(
hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM
)
for (family, socktype, proto, canonname, sockaddr) in addrinfos:
if socktype == socket.SOCK_STREAM:
yield family, sockaddr
guess = False
# some OS like AIX don't indicate SOCK_STREAM support, so just
# guess. :( We only do this if we did not get a single result marked
# as socktype == SOCK_STREAM.
if guess:
for family, _, _, _, sockaddr in addrinfos:
yield family, sockaddr
def connect(
self,
hostname,
port=SSH_PORT,
username=None,
password=None,
pkey=None,
key_filename=None,
timeout=None,
allow_agent=True,
look_for_keys=True,
compress=False,
sock=None,
gss_auth=False,
gss_kex=False,
gss_deleg_creds=True,
gss_host=None,
banner_timeout=None,
auth_timeout=None,
channel_timeout=None,
gss_trust_dns=True,
passphrase=None,
disabled_algorithms=None,
transport_factory=None,
auth_strategy=None,
):
"""
Connect to an SSH server and authenticate to it. The server's host key
is checked against the system host keys (see `load_system_host_keys`)
and any local host keys (`load_host_keys`). If the server's hostname
is not found in either set of host keys, the missing host key policy
is used (see `set_missing_host_key_policy`). The default policy is
to reject the key and raise an `.SSHException`.
Authentication is attempted in the following order of priority:
- The ``pkey`` or ``key_filename`` passed in (if any)
- ``key_filename`` may contain OpenSSH public certificate paths
as well as regular private-key paths; when files ending in
``-cert.pub`` are found, they are assumed to match a private
key, and both components will be loaded. (The private key
itself does *not* need to be listed in ``key_filename`` for
this to occur - *just* the certificate.)
- Any key we can find through an SSH agent
- Any ``id_*`` keys discoverable in ``~/.ssh/``
- When OpenSSH-style public certificates exist that match an
existing such private key (so e.g. one has ``id_rsa`` and
``id_rsa-cert.pub``) the certificate will be loaded alongside
the private key and used for authentication.
- Plain username/password auth, if a password was given
If a private key requires a password to unlock it, and a password is
passed in, that password will be used to attempt to unlock the key.
:param str hostname: the server to connect to
:param int port: the server port to connect to
:param str username:
the username to authenticate as (defaults to the current local
username)
:param str password:
Used for password authentication; is also used for private key
decryption if ``passphrase`` is not given.
:param str passphrase:
Used for decrypting private keys.
:param .PKey pkey: an optional private key to use for authentication
:param str key_filename:
the filename, or list of filenames, of optional private key(s)
and/or certs to try for authentication
:param float timeout:
an optional timeout (in seconds) for the TCP connect
:param bool allow_agent:
set to False to disable connecting to the SSH agent
:param bool look_for_keys:
set to False to disable searching for discoverable private key
files in ``~/.ssh/``
:param bool compress: set to True to turn on compression
:param socket sock:
an open socket or socket-like object (such as a `.Channel`) to use
for communication to the target host
:param bool gss_auth:
``True`` if you want to use GSS-API authentication
:param bool gss_kex:
Perform GSS-API Key Exchange and user authentication
:param bool gss_deleg_creds: Delegate GSS-API client credentials or not
:param str gss_host:
The targets name in the kerberos database. default: hostname
:param bool gss_trust_dns:
Indicates whether or not the DNS is trusted to securely
canonicalize the name of the host being connected to (default
``True``).
:param float banner_timeout: an optional timeout (in seconds) to wait
for the SSH banner to be presented.
:param float auth_timeout: an optional timeout (in seconds) to wait for
an authentication response.
:param float channel_timeout: an optional timeout (in seconds) to wait
for a channel open response.
:param dict disabled_algorithms:
an optional dict passed directly to `.Transport` and its keyword
argument of the same name.
:param transport_factory:
an optional callable which is handed a subset of the constructor
arguments (primarily those related to the socket, GSS
functionality, and algorithm selection) and generates a
`.Transport` instance to be used by this client. Defaults to
`.Transport.__init__`.
:param auth_strategy:
an optional instance of `.AuthStrategy`, triggering use of this
newer authentication mechanism instead of SSHClient's legacy auth
method.
.. warning::
This parameter is **incompatible** with all other
authentication-related parameters (such as, but not limited to,
``password``, ``key_filename`` and ``allow_agent``) and will
trigger an exception if given alongside them.
:returns:
`.AuthResult` if ``auth_strategy`` is non-``None``; otherwise,
returns ``None``.
:raises BadHostKeyException:
if the server's host key could not be verified.
:raises AuthenticationException:
if authentication failed.
:raises UnableToAuthenticate:
if authentication failed (when ``auth_strategy`` is non-``None``;
and note that this is a subclass of ``AuthenticationException``).
:raises socket.error:
if a socket error (other than connection-refused or
host-unreachable) occurred while connecting.
:raises NoValidConnectionsError:
if all valid connection targets for the requested hostname (eg IPv4
and IPv6) yielded connection-refused or host-unreachable socket
errors.
:raises SSHException:
if there was any other error connecting or establishing an SSH
session.
.. versionchanged:: 1.15
Added the ``banner_timeout``, ``gss_auth``, ``gss_kex``,
``gss_deleg_creds`` and ``gss_host`` arguments.
.. versionchanged:: 2.3
Added the ``gss_trust_dns`` argument.
.. versionchanged:: 2.4
Added the ``passphrase`` argument.
.. versionchanged:: 2.6
Added the ``disabled_algorithms`` argument.
.. versionchanged:: 2.12
Added the ``transport_factory`` argument.
.. versionchanged:: 3.2
Added the ``auth_strategy`` argument.
"""
if not sock:
errors = {}
# Try multiple possible address families (e.g. IPv4 vs IPv6)
to_try = list(self._families_and_addresses(hostname, port))
for af, addr in to_try:
try:
sock = socket.socket(af, socket.SOCK_STREAM)
if timeout is not None:
try:
sock.settimeout(timeout)
except:
pass
sock.connect(addr)
# Break out of the loop on success
break
except socket.error as e:
# As mentioned in socket docs it is better
# to close sockets explicitly
if sock:
sock.close()
# Raise anything that isn't a straight up connection error
# (such as a resolution error)
if e.errno not in (ECONNREFUSED, EHOSTUNREACH):
raise
# Capture anything else so we know how the run looks once
# iteration is complete. Retain info about which attempt
# this was.
errors[addr] = e
# Make sure we explode usefully if no address family attempts
# succeeded. We've no way of knowing which error is the "right"
# one, so we construct a hybrid exception containing all the real
# ones, of a subclass that client code should still be watching for
# (socket.error)
if len(errors) == len(to_try):
raise NoValidConnectionsError(errors)
if transport_factory is None:
transport_factory = Transport
t = self._transport = transport_factory(
sock,
gss_kex=gss_kex,
gss_deleg_creds=gss_deleg_creds,
disabled_algorithms=disabled_algorithms,
)
t.use_compression(compress=compress)
t.set_gss_host(
# t.hostname may be None, but GSS-API requires a target name.
# Therefore use hostname as fallback.
gss_host=gss_host or hostname,
trust_dns=gss_trust_dns,
gssapi_requested=gss_auth or gss_kex,
)
if self._log_channel is not None:
t.set_log_channel(self._log_channel)
if banner_timeout is not None:
t.banner_timeout = banner_timeout
if auth_timeout is not None:
t.auth_timeout = auth_timeout
if channel_timeout is not None:
t.channel_timeout = channel_timeout
if port == SSH_PORT:
server_hostkey_name = hostname
else:
server_hostkey_name = "[{}]:{}".format(hostname, port)
our_server_keys = None
our_server_keys = self._system_host_keys.get(server_hostkey_name)
if our_server_keys is None:
our_server_keys = self._host_keys.get(server_hostkey_name)
if our_server_keys is not None:
keytype = our_server_keys.keys()[0]
sec_opts = t.get_security_options()
other_types = [x for x in sec_opts.key_types if x != keytype]
sec_opts.key_types = [keytype] + other_types
t.start_client(timeout=timeout)
# If GSS-API Key Exchange is performed we are not required to check the
# host key, because the host is authenticated via GSS-API / SSPI as
# well as our client.
if not self._transport.gss_kex_used:
server_key = t.get_remote_server_key()
if our_server_keys is None:
# will raise exception if the key is rejected
self._policy.missing_host_key(
self, server_hostkey_name, server_key
)
else:
our_key = our_server_keys.get(server_key.get_name())
if our_key != server_key:
if our_key is None:
our_key = list(our_server_keys.values())[0]
raise BadHostKeyException(hostname, server_key, our_key)
if username is None:
username = getpass.getuser()
# New auth flow!
if auth_strategy is not None:
return auth_strategy.authenticate(transport=t)
# Old auth flow!
if key_filename is None:
key_filenames = []
elif isinstance(key_filename, str):
key_filenames = [key_filename]
else:
key_filenames = key_filename
self._auth(
username,
password,
pkey,
key_filenames,
allow_agent,
look_for_keys,
gss_auth,
gss_kex,
gss_deleg_creds,
t.gss_host,
passphrase,
)
def close(self):
"""
Close this SSHClient and its underlying `.Transport`.
This should be called anytime you are done using the client object.
.. warning::
Paramiko registers garbage collection hooks that will try to
automatically close connections for you, but this is not presently
reliable. Failure to explicitly close your client after use may
lead to end-of-process hangs!
"""
if self._transport is None:
return
self._transport.close()
self._transport = None
if self._agent is not None:
self._agent.close()
self._agent = None
def exec_command(
self,
command,
bufsize=-1,
timeout=None,
get_pty=False,
environment=None,
):
"""
Execute a command on the SSH server. A new `.Channel` is opened and
the requested command is executed. The command's input and output
streams are returned as Python ``file``-like objects representing
stdin, stdout, and stderr.
:param str command: the command to execute
:param int bufsize:
interpreted the same way as by the built-in ``file()`` function in
Python
:param int timeout:
set command's channel timeout. See `.Channel.settimeout`
:param bool get_pty:
Request a pseudo-terminal from the server (default ``False``).
See `.Channel.get_pty`
:param dict environment:
a dict of shell environment variables, to be merged into the
default environment that the remote command executes within.
.. warning::
Servers may silently reject some environment variables; see the
warning in `.Channel.set_environment_variable` for details.
:return:
the stdin, stdout, and stderr of the executing command, as a
3-tuple
:raises: `.SSHException` -- if the server fails to execute the command
.. versionchanged:: 1.10
Added the ``get_pty`` kwarg.
"""
chan = self._transport.open_session(timeout=timeout)
if get_pty:
chan.get_pty()
chan.settimeout(timeout)
if environment:
chan.update_environment(environment)
chan.exec_command(command)
stdin = chan.makefile_stdin("wb", bufsize)
stdout = chan.makefile("r", bufsize)
stderr = chan.makefile_stderr("r", bufsize)
return stdin, stdout, stderr
def invoke_shell(
self,
term="vt100",
width=80,
height=24,
width_pixels=0,
height_pixels=0,
environment=None,
):
"""
Start an interactive shell session on the SSH server. A new `.Channel`
is opened and connected to a pseudo-terminal using the requested
terminal type and size.
:param str term:
the terminal type to emulate (for example, ``"vt100"``)
:param int width: the width (in characters) of the terminal window
:param int height: the height (in characters) of the terminal window
:param int width_pixels: the width (in pixels) of the terminal window
:param int height_pixels: the height (in pixels) of the terminal window
:param dict environment: the command's environment
:return: a new `.Channel` connected to the remote shell
:raises: `.SSHException` -- if the server fails to invoke a shell
"""
chan = self._transport.open_session()
chan.get_pty(term, width, height, width_pixels, height_pixels)
chan.invoke_shell()
return chan
def open_sftp(self):
"""
Open an SFTP session on the SSH server.
:return: a new `.SFTPClient` session object
"""
return self._transport.open_sftp_client()
def get_transport(self):
"""
Return the underlying `.Transport` object for this SSH connection.
This can be used to perform lower-level tasks, like opening specific
kinds of channels.
:return: the `.Transport` for this connection
"""
return self._transport
def _key_from_filepath(self, filename, klass, password):
"""
Attempt to derive a `.PKey` from given string path ``filename``:
- If ``filename`` appears to be a cert, the matching private key is
loaded.
- Otherwise, the filename is assumed to be a private key, and the
matching public cert will be loaded if it exists.
"""
cert_suffix = "-cert.pub"
# Assume privkey, not cert, by default
if filename.endswith(cert_suffix):
key_path = filename[: -len(cert_suffix)]
cert_path = filename
else:
key_path = filename
cert_path = filename + cert_suffix
# Blindly try the key path; if no private key, nothing will work.
key = klass.from_private_key_file(key_path, password)
# TODO: change this to 'Loading' instead of 'Trying' sometime; probably
# when #387 is released, since this is a critical log message users are
# likely testing/filtering for (bah.)
msg = "Trying discovered key {} in {}".format(
hexlify(key.get_fingerprint()), key_path
)
self._log(DEBUG, msg)
# Attempt to load cert if it exists.
if os.path.isfile(cert_path):
key.load_certificate(cert_path)
self._log(DEBUG, "Adding public certificate {}".format(cert_path))
return key
def _auth(
self,
username,
password,
pkey,
key_filenames,
allow_agent,
look_for_keys,
gss_auth,
gss_kex,
gss_deleg_creds,
gss_host,
passphrase,
):
"""
Try, in order:
- The key(s) passed in, if one was passed in.
- Any key we can find through an SSH agent (if allowed).
- Any id_* key discoverable in ~/.ssh/ (if allowed).
- Plain username/password auth, if a password was given.
(The password might be needed to unlock a private key [if 'passphrase'
isn't also given], or for two-factor authentication [for which it is
required].)
"""
saved_exception = None
two_factor = False
allowed_types = set()
two_factor_types = {"keyboard-interactive", "password"}
if passphrase is None and password is not None:
passphrase = password
# If GSS-API support and GSS-PI Key Exchange was performed, we attempt
# authentication with gssapi-keyex.
if gss_kex and self._transport.gss_kex_used:
try:
self._transport.auth_gssapi_keyex(username)
return
except Exception as e:
saved_exception = e
# Try GSS-API authentication (gssapi-with-mic) only if GSS-API Key
# Exchange is not performed, because if we use GSS-API for the key
# exchange, there is already a fully established GSS-API context, so
# why should we do that again?
if gss_auth:
try:
return self._transport.auth_gssapi_with_mic(
username, gss_host, gss_deleg_creds
)
except Exception as e:
saved_exception = e
if pkey is not None:
try:
self._log(
DEBUG,
"Trying SSH key {}".format(
hexlify(pkey.get_fingerprint())
),
)
allowed_types = set(
self._transport.auth_publickey(username, pkey)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
except SSHException as e:
saved_exception = e
if not two_factor:
for key_filename in key_filenames:
# TODO 4.0: leverage PKey.from_path() if we don't end up just
# killing SSHClient entirely
for pkey_class in (RSAKey, ECDSAKey, Ed25519Key):
try:
key = self._key_from_filepath(
key_filename, pkey_class, passphrase
)
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor and allow_agent:
if self._agent is None:
self._agent = Agent()
for key in self._agent.get_keys():
try:
id_ = hexlify(key.get_fingerprint())
self._log(DEBUG, "Trying SSH agent key {}".format(id_))
# for 2-factor auth a successfully auth'd key password
# will return an allowed 2fac auth method
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except SSHException as e:
saved_exception = e
if not two_factor:
keyfiles = []
for keytype, name in [
(RSAKey, "rsa"),
(ECDSAKey, "ecdsa"),
(Ed25519Key, "ed25519"),
]:
# ~/ssh/ is for windows
for directory in [".ssh", "ssh"]:
full_path = os.path.expanduser(
"~/{}/id_{}".format(directory, name)
)
if os.path.isfile(full_path):
# TODO: only do this append if below did not run
keyfiles.append((keytype, full_path))
if os.path.isfile(full_path + "-cert.pub"):
keyfiles.append((keytype, full_path + "-cert.pub"))
if not look_for_keys:
keyfiles = []
for pkey_class, filename in keyfiles:
try:
key = self._key_from_filepath(
filename, pkey_class, passphrase
)
# for 2-factor auth a successfully auth'd key will result
# in ['password']
allowed_types = set(
self._transport.auth_publickey(username, key)
)
two_factor = allowed_types & two_factor_types
if not two_factor:
return
break
except (SSHException, IOError) as e:
saved_exception = e
if password is not None:
try:
self._transport.auth_password(username, password)
return
except SSHException as e:
saved_exception = e
elif two_factor:
try:
self._transport.auth_interactive_dumb(username)
return
except SSHException as e:
saved_exception = e
# if we got an auth-failed exception earlier, re-raise it
if saved_exception is not None:
raise saved_exception
raise SSHException("No authentication methods available")
def _log(self, level, msg):
self._transport._log(level, msg)
| SSHClient |
python | getsentry__sentry | src/sentry/api/serializers/release_details_types.py | {
"start": 1870,
"end": 1917
} | class ____(BaseProject):
newGroups: int
| Project |
python | ray-project__ray | python/ray/tests/gpu_objects/test_gpu_objects_nixl.py | {
"start": 186,
"end": 8366
} | class ____:
def __init__(self):
self.reserved_tensor1 = torch.tensor([1, 2, 3]).to("cuda")
self.reserved_tensor2 = torch.tensor([4, 5, 6]).to("cuda")
self.reserved_tensor3 = torch.tensor([7, 8, 9]).to("cuda")
@ray.method(tensor_transport="nixl")
def echo(self, data, device):
return data.to(device)
def sum(self, data, device):
assert data.device.type == device
return data.sum().item()
def produce(self, tensors):
refs = []
for t in tensors:
refs.append(ray.put(t, _tensor_transport="nixl"))
return refs
def consume_with_nixl(self, refs):
tensors = [ray.get(ref) for ref in refs]
sum = 0
for t in tensors:
assert t.device.type == "cuda"
sum += t.sum().item()
return sum
def consume_with_object_store(self, refs):
tensors = [ray.get(ref, _tensor_transport="object_store") for ref in refs]
sum = 0
for t in tensors:
assert t.device.type == "cuda"
sum += t.sum().item()
return sum
def gc(self):
tensor = torch.tensor([1, 2, 3]).to("cuda")
ref = ray.put(tensor, _tensor_transport="nixl")
obj_id = ref.hex()
gpu_manager = ray._private.worker.global_worker.gpu_object_manager
assert gpu_manager.gpu_object_store.has_tensor(tensor)
assert obj_id in gpu_manager.managed_gpu_object_metadata
nixl_meta = gpu_manager.gpu_object_store._managed_meta_nixl[obj_id]
assert nixl_meta is not None
assert gpu_manager.gpu_object_store._managed_meta_counts_nixl[nixl_meta] == 1
del ref
gpu_manager.gpu_object_store.wait_tensor_freed(tensor, timeout=10)
assert not gpu_manager.gpu_object_store.has_tensor(tensor)
assert obj_id not in gpu_manager.managed_gpu_object_metadata
assert obj_id not in gpu_manager.gpu_object_store._managed_meta_nixl
assert nixl_meta not in gpu_manager.gpu_object_store._managed_meta_counts_nixl
return "Success"
@ray.method(tensor_transport="nixl")
def send_dict1(self):
return {"round1-1": self.reserved_tensor1, "round1-2": self.reserved_tensor2}
@ray.method(tensor_transport="nixl")
def send_dict2(self):
return {"round2-1": self.reserved_tensor1, "round2-3": self.reserved_tensor3}
def sum_dict(self, dict):
return sum(v.sum().item() for v in dict.values())
def get_num_gpu_objects(self):
gpu_object_manager = ray._private.worker.global_worker.gpu_object_manager
return gpu_object_manager.gpu_object_store.get_num_objects()
def get_num_managed_meta_nixl(self):
gpu_object_manager = ray._private.worker.global_worker.gpu_object_manager
return gpu_object_manager.gpu_object_store.get_num_managed_meta_nixl()
@ray.method(concurrency_group="_ray_system")
def block_background_thread(self, signal_actor):
ray.get(signal_actor.wait.remote())
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_ray_get_gpu_ref_created_by_actor_task(ray_start_regular):
actor = GPUTestActor.remote()
tensor = torch.tensor([1, 2, 3]).to("cuda")
ref1 = actor.echo.remote(tensor, "cuda")
ref2 = actor.echo.remote(tensor, "cuda")
ref3 = actor.echo.remote(tensor, "cuda")
# Test ray.get with default tensor transport, should use nixl here.
# TODO: Verify it's using the correct tensor transport.
assert torch.equal(ray.get(ref1), tensor)
# # Test ray.get with nixl tensor transport
assert torch.equal(ray.get(ref2, _tensor_transport="nixl"), tensor)
# # Test ray.get with object store tensor transport
assert torch.equal(ray.get(ref3, _tensor_transport="object_store"), tensor)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_p2p(ray_start_regular):
num_actors = 2
actors = [GPUTestActor.remote() for _ in range(num_actors)]
src_actor, dst_actor = actors[0], actors[1]
# Create test tensor
tensor = torch.tensor([1, 2, 3])
tensor1 = torch.tensor([4, 5, 6])
# Test GPU to GPU transfer
ref = src_actor.echo.remote(tensor, "cuda")
# Trigger tensor transfer from src to dst actor
result = dst_actor.sum.remote(ref, "cuda")
assert tensor.sum().item() == ray.get(result)
# Test CPU to CPU transfer
ref1 = src_actor.echo.remote(tensor1, "cpu")
result1 = dst_actor.sum.remote(ref1, "cpu")
assert tensor1.sum().item() == ray.get(result1)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_intra_gpu_tensor_transfer(ray_start_regular):
actor = GPUTestActor.remote()
tensor = torch.tensor([1, 2, 3])
# Intra-actor communication for pure GPU tensors
ref = actor.echo.remote(tensor, "cuda")
result = actor.sum.remote(ref, "cuda")
assert tensor.sum().item() == ray.get(result)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_put_and_get_object_with_nixl(ray_start_regular):
actors = [GPUTestActor.remote() for _ in range(2)]
src_actor, dst_actor = actors[0], actors[1]
tensor1 = torch.tensor([1, 2, 3]).to("cuda")
tensor2 = torch.tensor([4, 5, 6, 0]).to("cuda")
tensor3 = torch.tensor([7, 8, 9, 0, 0]).to("cuda")
tensors = [tensor1, tensor2, tensor3]
ref = src_actor.produce.remote(tensors)
ref1 = dst_actor.consume_with_nixl.remote(ref)
result1 = ray.get(ref1)
assert result1 == 45
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_put_and_get_object_with_object_store(ray_start_regular):
actors = [GPUTestActor.remote() for _ in range(2)]
src_actor, dst_actor = actors[0], actors[1]
tensor1 = torch.tensor([1, 2, 3]).to("cuda")
tensor2 = torch.tensor([4, 5, 6, 0]).to("cuda")
tensor3 = torch.tensor([7, 8, 9, 0, 0]).to("cuda")
tensors = [tensor1, tensor2, tensor3]
ref = src_actor.produce.remote(tensors)
ref1 = dst_actor.consume_with_object_store.remote(ref)
result1 = ray.get(ref1)
assert result1 == 45
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 1}], indirect=True)
def test_put_gc(ray_start_regular):
actor = GPUTestActor.remote()
ref = actor.gc.remote()
assert ray.get(ref) == "Success"
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_send_duplicate_tensor(ray_start_regular):
actors = [GPUTestActor.remote() for _ in range(2)]
src_actor, dst_actor = actors[0], actors[1]
ref1 = src_actor.send_dict1.remote()
result1 = dst_actor.sum_dict.remote(ref1)
assert ray.get(result1) == 21
ref2 = src_actor.send_dict1.remote()
result2 = dst_actor.sum_dict.remote(ref2)
assert ray.get(result2) == 21
del ref1
del ref2
wait_for_condition(
lambda: ray.get(src_actor.get_num_gpu_objects.remote()) == 0,
timeout=10,
retry_interval_ms=100,
)
wait_for_condition(
lambda: ray.get(src_actor.get_num_managed_meta_nixl.remote()) == 0,
timeout=10,
retry_interval_ms=100,
)
@pytest.mark.parametrize("ray_start_regular", [{"num_gpus": 2}], indirect=True)
def test_nixl_abort(ray_start_regular):
actors = [GPUTestActor.remote() for _ in range(2)]
# Trigger transfer and kill sender before the receiver starts receiving
signal_actor = SignalActor.remote()
actors[1].block_background_thread.remote(signal_actor)
ref = actors[0].echo.remote(torch.randn((100, 100)), "cuda")
result = actors[1].sum.remote(ref, "cuda")
ray.kill(actors[0])
signal_actor.send.remote()
with pytest.raises(ray.exceptions.RayTaskError) as excinfo:
ray.get(result)
assert "ActorDiedError" in str(excinfo.value)
# Try a transfer with actor[1] receiving again
new_actor = GPUTestActor.remote()
ref = new_actor.echo.remote(torch.tensor([4, 5, 6]), "cuda")
result = actors[1].sum.remote(ref, "cuda")
assert ray.get(result) == 15
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| GPUTestActor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-gitlab/unit_tests/test_config_migrations.py | {
"start": 2034,
"end": 3349
} | class ____:
def test_given_valid_api_url_then_no_exception_is_raised(self, oauth_config):
config = dict(oauth_config)
config["api_url"] = "https://gitlab.com"
source = get_source(config=config, config_path=None)
migrated_config = source.configure(config=config, temp_dir="/not/a/real/path")
source.streams(migrated_config)
@pytest.mark.parametrize(
"invalid_api_url, expected_error",
[
("http://badscheme.com", "Http scheme is not allowed in this environment. Please use `https` instead."),
("not a valid url", "Invalid API resource locator."),
("gitlab.com/api/v2/resource", "Invalid API resource locator."),
],
ids=["bad-scheme", "invalid-url-string", "invalid-url-format"],
)
def test_given_invalid_api_url_then_exception_is_raised(self, mock_is_cloud_environment, oauth_config, invalid_api_url, expected_error):
config = dict(oauth_config)
config["api_url"] = invalid_api_url
source = get_source(config=config, config_path=None)
migrated_config = source.configure(config=config, temp_dir="/not/a/real/path")
with pytest.raises(ValueError) as e:
source.streams(migrated_config)
assert str(e.value) == expected_error
| TestValidations |
python | getsentry__sentry | src/sentry/spans/grouping/strategy/base.py | {
"start": 323,
"end": 1158
} | class ____(TypedDict):
trace_id: str
parent_span_id: str
span_id: str
is_segment: NotRequired[bool]
start_timestamp: float
timestamp: float
same_process_as_parent: bool
op: str
description: str | None
fingerprint: Sequence[str] | None
tags: Any | None
data: Any | None
sentry_tags: NotRequired[dict[str, str]]
hash: NotRequired[str]
# A callable strategy is a callable that when given a span, it tries to
# returns a fingerprint. If the strategy does not apply to the span, it
# should return `None` to indicate that the strategy should not be used
# and to try a different strategy. If the strategy does apply, it should
# return a list of strings that will serve as the span fingerprint.
CallableStrategy = Callable[[Span], Optional[Sequence[str]]]
@dataclass(frozen=True)
| Span |
python | walkccc__LeetCode | solutions/1721. Swapping Nodes in a Linked List/1721.py | {
"start": 0,
"end": 397
} | class ____:
def swapNodes(self, head: ListNode | None, k: int) -> ListNode | None:
p = None # Points the k-th node from the beginning.
q = None # Points the k-th node from the end.
curr = head
while curr:
if q:
q = q.next
k -= 1
if k == 0:
p = curr
q = head
curr = curr.next
p.val, q.val = q.val, p.val
return head
| Solution |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 15465,
"end": 15972
} | class ____:
param_names = ["shape", "head_count"]
params = [
get_benchmark_shapes("TimeHead"),
[5, 0.8],
]
def setup(self, shape, head_count):
self.df = generate_dataframe("int", *shape, RAND_LOW, RAND_HIGH)
self.head_count = (
int(head_count * len(self.df.index))
if isinstance(head_count, float)
else head_count
)
def time_head(self, shape, head_count):
execute(self.df.head(self.head_count))
| TimeHead |
python | langchain-ai__langchain | libs/core/langchain_core/load/serializable.py | {
"start": 2203,
"end": 11683
} | class ____(BaseModel, ABC):
"""Serializable base class.
This class is used to serialize objects to JSON.
It relies on the following methods and properties:
- `is_lc_serializable`: Is this class serializable?
By design, even if a class inherits from `Serializable`, it is not serializable
by default. This is to prevent accidental serialization of objects that should
not be serialized.
- `get_lc_namespace`: Get the namespace of the LangChain object.
During deserialization, this namespace is used to identify
the correct class to instantiate.
Please see the `Reviver` class in `langchain_core.load.load` for more details.
During deserialization an additional mapping is handle classes that have moved
or been renamed across package versions.
- `lc_secrets`: A map of constructor argument names to secret ids.
- `lc_attributes`: List of additional attribute names that should be included
as part of the serialized representation.
"""
# Remove default BaseModel init docstring.
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""""" # noqa: D419 # Intentional blank docstring
super().__init__(*args, **kwargs)
@classmethod
def is_lc_serializable(cls) -> bool:
"""Is this class serializable?
By design, even if a class inherits from `Serializable`, it is not serializable
by default. This is to prevent accidental serialization of objects that should
not be serialized.
Returns:
Whether the class is serializable. Default is `False`.
"""
return False
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
For example, if the class is `langchain.llms.openai.OpenAI`, then the
namespace is `["langchain", "llms", "openai"]`
Returns:
The namespace.
"""
return cls.__module__.split(".")
@property
def lc_secrets(self) -> dict[str, str]:
"""A map of constructor argument names to secret ids.
For example, `{"openai_api_key": "OPENAI_API_KEY"}`
"""
return {}
@property
def lc_attributes(self) -> dict:
"""List of attribute names that should be included in the serialized kwargs.
These attributes must be accepted by the constructor.
Default is an empty dictionary.
"""
return {}
@classmethod
def lc_id(cls) -> list[str]:
"""Return a unique identifier for this class for serialization purposes.
The unique identifier is a list of strings that describes the path
to the object.
For example, for the class `langchain.llms.openai.OpenAI`, the id is
`["langchain", "llms", "openai", "OpenAI"]`.
"""
# Pydantic generics change the class name. So we need to do the following
if (
"origin" in cls.__pydantic_generic_metadata__
and cls.__pydantic_generic_metadata__["origin"] is not None
):
original_name = cls.__pydantic_generic_metadata__["origin"].__name__
else:
original_name = cls.__name__
return [*cls.get_lc_namespace(), original_name]
model_config = ConfigDict(
extra="ignore",
)
@override
def __repr_args__(self) -> Any:
return [
(k, v)
for k, v in super().__repr_args__()
if (k not in type(self).model_fields or try_neq_default(v, k, self))
]
def to_json(self) -> SerializedConstructor | SerializedNotImplemented:
"""Serialize the object to JSON.
Raises:
ValueError: If the class has deprecated attributes.
Returns:
A JSON serializable object or a `SerializedNotImplemented` object.
"""
if not self.is_lc_serializable():
return self.to_json_not_implemented()
model_fields = type(self).model_fields
secrets = {}
# Get latest values for kwargs if there is an attribute with same name
lc_kwargs = {}
for k, v in self:
if not _is_field_useful(self, k, v):
continue
# Do nothing if the field is excluded
if k in model_fields and model_fields[k].exclude:
continue
lc_kwargs[k] = getattr(self, k, v)
# Merge the lc_secrets and lc_attributes from every class in the MRO
for cls in [None, *self.__class__.mro()]:
# Once we get to Serializable, we're done
if cls is Serializable:
break
if cls:
deprecated_attributes = [
"lc_namespace",
"lc_serializable",
]
for attr in deprecated_attributes:
if hasattr(cls, attr):
msg = (
f"Class {self.__class__} has a deprecated "
f"attribute {attr}. Please use the corresponding "
f"classmethod instead."
)
raise ValueError(msg)
# Get a reference to self bound to each class in the MRO
this = cast("Serializable", self if cls is None else super(cls, self))
secrets.update(this.lc_secrets)
# Now also add the aliases for the secrets
# This ensures known secret aliases are hidden.
# Note: this does NOT hide any other extra kwargs
# that are not present in the fields.
for key in list(secrets):
value = secrets[key]
if (key in model_fields) and (
alias := model_fields[key].alias
) is not None:
secrets[alias] = value
lc_kwargs.update(this.lc_attributes)
# include all secrets, even if not specified in kwargs
# as these secrets may be passed as an environment variable instead
for key in secrets:
secret_value = getattr(self, key, None) or lc_kwargs.get(key)
if secret_value is not None:
lc_kwargs.update({key: secret_value})
return {
"lc": 1,
"type": "constructor",
"id": self.lc_id(),
"kwargs": lc_kwargs
if not secrets
else _replace_secrets(lc_kwargs, secrets),
}
def to_json_not_implemented(self) -> SerializedNotImplemented:
"""Serialize a "not implemented" object.
Returns:
`SerializedNotImplemented`.
"""
return to_json_not_implemented(self)
def _is_field_useful(inst: Serializable, key: str, value: Any) -> bool:
"""Check if a field is useful as a constructor argument.
Args:
inst: The instance.
key: The key.
value: The value.
Returns:
Whether the field is useful. If the field is required, it is useful.
If the field is not required, it is useful if the value is not `None`.
If the field is not required and the value is `None`, it is useful if the
default value is different from the value.
"""
field = type(inst).model_fields.get(key)
if not field:
return False
if field.is_required():
return True
# Handle edge case: a value cannot be converted to a boolean (e.g. a
# Pandas DataFrame).
try:
value_is_truthy = bool(value)
except Exception as _:
value_is_truthy = False
if value_is_truthy:
return True
# Value is still falsy here!
if field.default_factory is dict and isinstance(value, dict):
return False
# Value is still falsy here!
if field.default_factory is list and isinstance(value, list):
return False
value_neq_default = _try_neq_default(value, field)
# If value is falsy and does not match the default
return value_is_truthy or value_neq_default
def _replace_secrets(
root: dict[Any, Any], secrets_map: dict[str, str]
) -> dict[Any, Any]:
result = root.copy()
for path, secret_id in secrets_map.items():
[*parts, last] = path.split(".")
current = result
for part in parts:
if part not in current:
break
current[part] = current[part].copy()
current = current[part]
if last in current:
current[last] = {
"lc": 1,
"type": "secret",
"id": [secret_id],
}
return result
def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
"""Serialize a "not implemented" object.
Args:
obj: Object to serialize.
Returns:
`SerializedNotImplemented`
"""
id_: list[str] = []
try:
if hasattr(obj, "__name__"):
id_ = [*obj.__module__.split("."), obj.__name__]
elif hasattr(obj, "__class__"):
id_ = [*obj.__class__.__module__.split("."), obj.__class__.__name__]
except Exception:
logger.debug("Failed to serialize object", exc_info=True)
result: SerializedNotImplemented = {
"lc": 1,
"type": "not_implemented",
"id": id_,
"repr": None,
}
with contextlib.suppress(Exception):
result["repr"] = repr(obj)
return result
| Serializable |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/cachecontrol/filewrapper.py | {
"start": 281,
"end": 4292
} | class ____:
"""
Small wrapper around a fp object which will tee everything read into a
buffer, and when that file is closed it will execute a callback with the
contents of that buffer.
All attributes are proxied to the underlying file object.
This class uses members with a double underscore (__) leading prefix so as
not to accidentally shadow an attribute.
The data is stored in a temporary file until it is all available. As long
as the temporary files directory is disk-based (sometimes it's a
memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
pressure is high. For small files the disk usually won't be used at all,
it'll all be in the filesystem memory cache, so there should be no
performance impact.
"""
def __init__(
self, fp: HTTPResponse, callback: Callable[[bytes], None] | None
) -> None:
self.__buf = NamedTemporaryFile("rb+", delete=True)
self.__fp = fp
self.__callback = callback
def __getattr__(self, name: str) -> Any:
# The vaguaries of garbage collection means that self.__fp is
# not always set. By using __getattribute__ and the private
# name[0] allows looking up the attribute value and raising an
# AttributeError when it doesn't exist. This stop thigns from
# infinitely recursing calls to getattr in the case where
# self.__fp hasn't been set.
#
# [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
fp = self.__getattribute__("_CallbackFileWrapper__fp")
return getattr(fp, name)
def __is_fp_closed(self) -> bool:
try:
return self.__fp.fp is None
except AttributeError:
pass
try:
closed: bool = self.__fp.closed
return closed
except AttributeError:
pass
# We just don't cache it then.
# TODO: Add some logging here...
return False
def _close(self) -> None:
if self.__callback:
if self.__buf.tell() == 0:
# Empty file:
result = b""
else:
# Return the data without actually loading it into memory,
# relying on Python's buffer API and mmap(). mmap() just gives
# a view directly into the filesystem's memory cache, so it
# doesn't result in duplicate memory use.
self.__buf.seek(0, 0)
result = memoryview(
mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
)
self.__callback(result)
# We assign this to None here, because otherwise we can get into
# really tricky problems where the CPython interpreter dead locks
# because the callback is holding a reference to something which
# has a __del__ method. Setting this to None breaks the cycle
# and allows the garbage collector to do it's thing normally.
self.__callback = None
# Closing the temporary file releases memory and frees disk space.
# Important when caching big files.
self.__buf.close()
def read(self, amt: int | None = None) -> bytes:
data: bytes = self.__fp.read(amt)
if data:
# We may be dealing with b'', a sign that things are over:
# it's passed e.g. after we've already closed self.__buf.
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
def _safe_read(self, amt: int) -> bytes:
data: bytes = self.__fp._safe_read(amt) # type: ignore[attr-defined]
if amt == 2 and data == b"\r\n":
# urllib executes this read to toss the CRLF at the end
# of the chunk.
return data
self.__buf.write(data)
if self.__is_fp_closed():
self._close()
return data
| CallbackFileWrapper |
python | langchain-ai__langchain | libs/partners/ollama/tests/integration_tests/chat_models/test_chat_models_standard.py | {
"start": 300,
"end": 1956
} | class ____(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOllama]:
return ChatOllama
@property
def chat_model_params(self) -> dict:
return {"model": DEFAULT_MODEL_NAME}
@property
def supports_json_mode(self) -> bool:
return True
@property
def has_tool_choice(self) -> bool:
# TODO: update after Ollama implements
# https://github.com/ollama/ollama/blob/main/docs/openai.md#supported-request-fields
return False
@property
def supports_image_inputs(self) -> bool:
return True
@pytest.mark.xfail(
reason=(
"Will sometime encounter AssertionErrors where tool responses are "
"`'3'` instead of `3`"
)
)
def test_tool_calling(self, model: BaseChatModel) -> None:
super().test_tool_calling(model)
@pytest.mark.xfail(
reason=(
"Will sometime encounter AssertionErrors where tool responses are "
"`'3'` instead of `3`"
)
)
async def test_tool_calling_async(self, model: BaseChatModel) -> None:
await super().test_tool_calling_async(model)
@pytest.mark.xfail(
reason=(
"Will sometimes fail due to Ollama's inconsistent tool call argument "
"structure (see https://github.com/ollama/ollama/issues/6155). "
"Args may contain unexpected keys like 'conversations' instead of "
"empty dict."
)
)
def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None:
super().test_tool_calling_with_no_arguments(model)
| TestChatOllama |
python | openai__openai-python | src/openai/types/fine_tuning/jobs/fine_tuning_job_checkpoint.py | {
"start": 240,
"end": 596
} | class ____(BaseModel):
full_valid_loss: Optional[float] = None
full_valid_mean_token_accuracy: Optional[float] = None
step: Optional[float] = None
train_loss: Optional[float] = None
train_mean_token_accuracy: Optional[float] = None
valid_loss: Optional[float] = None
valid_mean_token_accuracy: Optional[float] = None
| Metrics |
python | ansible__ansible | lib/ansible/cli/doc.py | {
"start": 17321,
"end": 73077
} | class ____(CLI, RoleMixin):
""" displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
and it can create a short "snippet" which can be pasted into a playbook. """
name = 'ansible-doc'
# default ignore list for detailed views
IGNORE = ('module', 'docuri', 'version_added', 'version_added_collection', 'short_description',
'now_date', 'plainexamples', 'returndocs', 'collection', 'plugin_name')
# Warning: If you add more elements here, you also need to add it to the docsite build (in the
# ansible-community/antsibull repo)
_ITALIC = re.compile(r"\bI\(([^)]+)\)")
_BOLD = re.compile(r"\bB\(([^)]+)\)")
_MODULE = re.compile(r"\bM\(([^)]+)\)")
_PLUGIN = re.compile(r"\bP\(([^#)]+)#([a-z]+)\)")
_LINK = re.compile(r"\bL\(([^)]+), *([^)]+)\)")
_URL = re.compile(r"\bU\(([^)]+)\)")
_REF = re.compile(r"\bR\(([^)]+), *([^)]+)\)")
_CONST = re.compile(r"\bC\(([^)]+)\)")
_SEM_PARAMETER_STRING = r"\(((?:[^\\)]+|\\.)+)\)"
_SEM_OPTION_NAME = re.compile(r"\bO" + _SEM_PARAMETER_STRING)
_SEM_OPTION_VALUE = re.compile(r"\bV" + _SEM_PARAMETER_STRING)
_SEM_ENV_VARIABLE = re.compile(r"\bE" + _SEM_PARAMETER_STRING)
_SEM_RET_VALUE = re.compile(r"\bRV" + _SEM_PARAMETER_STRING)
_RULER = re.compile(r"\bHORIZONTALLINE\b")
# helper for unescaping
_UNESCAPE = re.compile(r"\\(.)")
_FQCN_TYPE_PREFIX_RE = re.compile(r'^([^.]+\.[^.]+\.[^#]+)#([a-z]+):(.*)$')
_IGNORE_MARKER = 'ignore:'
# rst specific
_RST_NOTE = re.compile(r".. note::")
_RST_SEEALSO = re.compile(r".. seealso::")
_RST_ROLES = re.compile(r":\w+?:`")
_RST_DIRECTIVES = re.compile(r".. \w+?::")
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.plugin_list = set()
@staticmethod
def _tty_ify_sem_simle(matcher):
text = DocCLI._UNESCAPE.sub(r'\1', matcher.group(1))
return f"`{text}'"
@staticmethod
def _tty_ify_sem_complex(matcher):
text = DocCLI._UNESCAPE.sub(r'\1', matcher.group(1))
value = None
if '=' in text:
text, value = text.split('=', 1)
m = DocCLI._FQCN_TYPE_PREFIX_RE.match(text)
if m:
plugin_fqcn = m.group(1)
plugin_type = m.group(2)
text = m.group(3)
elif text.startswith(DocCLI._IGNORE_MARKER):
text = text[len(DocCLI._IGNORE_MARKER):]
plugin_fqcn = plugin_type = ''
else:
plugin_fqcn = plugin_type = ''
entrypoint = None
if ':' in text:
entrypoint, text = text.split(':', 1)
if value is not None:
text = f"{text}={value}"
if plugin_fqcn and plugin_type:
plugin_suffix = '' if plugin_type in ('role', 'module', 'playbook') else ' plugin'
plugin = f"{plugin_type}{plugin_suffix} {plugin_fqcn}"
if plugin_type == 'role' and entrypoint is not None:
plugin = f"{plugin}, {entrypoint} entrypoint"
return f"`{text}' (of {plugin})"
return f"`{text}'"
@classmethod
def tty_ify(cls, text):
# general formatting
t = cls._ITALIC.sub(_format(r"\1", 'UNDERLINE'), text) # no ascii code for this
t = cls._BOLD.sub(_format(r"\1", 'BOLD'), t)
t = cls._MODULE.sub(_format(r"\1", 'MODULE'), t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._LINK.sub(r"\1 <\2>", t) # L(word, url) => word <url>
t = cls._PLUGIN.sub(_format("[" + r"\1" + "]", 'PLUGIN'), t) # P(word#type) => [word]
t = cls._REF.sub(_format(r"\1", 'REF'), t) # R(word, sphinx-ref) => word
t = cls._CONST.sub(_format(r"`\1'", 'CONSTANT'), t)
t = cls._SEM_OPTION_NAME.sub(cls._tty_ify_sem_complex, t) # O(expr)
t = cls._SEM_OPTION_VALUE.sub(cls._tty_ify_sem_simle, t) # V(expr)
t = cls._SEM_ENV_VARIABLE.sub(cls._tty_ify_sem_simle, t) # E(expr)
t = cls._SEM_RET_VALUE.sub(cls._tty_ify_sem_complex, t) # RV(expr)
t = cls._RULER.sub("\n{0}\n".format("-" * 13), t) # HORIZONTALLINE => -------
# remove rst
t = cls._RST_SEEALSO.sub(r"See also:", t) # seealso to See also:
t = cls._RST_NOTE.sub(_format(r"Note:", 'bold'), t) # .. note:: to note:
t = cls._RST_ROLES.sub(r"`", t) # remove :ref: and other tags, keep tilde to match ending one
t = cls._RST_DIRECTIVES.sub(r"", t) # remove .. stuff:: in general
# handle docsite refs
# U(word) => word
t = re.sub(cls._URL, lambda m: _format(r"%s" % _doclink(m.group(1)), 'LINK'), t)
# L(word, url) => word <url>
t = re.sub(cls._LINK, lambda m: r"%s <%s>" % (m.group(1), _format(_doclink(m.group(2)), 'LINK')), t)
return t
def init_parser(self):
coll_filter = 'A supplied argument will be used for filtering, can be a namespace or full collection name.'
super(DocCLI, self).init_parser(
desc="plugin documentation tool",
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
)
opt_help.add_module_options(self.parser)
opt_help.add_basedir_options(self.parser)
# targets
self.parser.add_argument('args', nargs='*', help='Plugin', metavar='plugin')
self.parser.add_argument("-t", "--type", action="store", default='module', dest='type',
help='Choose which plugin type (defaults to "module"). '
'Available plugin types are : {0}'.format(TARGET_OPTIONS),
choices=TARGET_OPTIONS)
# formatting
self.parser.add_argument("-j", "--json", action="store_true", default=False, dest='json_format',
help='Change output into json format.')
# TODO: warn if not used with -t roles
# role-specific options
self.parser.add_argument("-r", "--roles-path", dest='roles_path', default=C.DEFAULT_ROLES_PATH,
type=opt_help.unfrack_path(pathsep=True),
action=opt_help.PrependListAction,
help='The path to the directory containing your roles.')
# exclusive modifiers
exclusive = self.parser.add_mutually_exclusive_group()
# TODO: warn if not used with -t roles
exclusive.add_argument("-e", "--entry-point", dest="entry_point",
help="Select the entry point for role(s).")
# TODO: warn with --json as it is incompatible
exclusive.add_argument("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for these plugin types: %s' % ', '.join(SNIPPETS))
# TODO: warn when arg/plugin is passed
exclusive.add_argument("-F", "--list_files", action="store_true", default=False, dest="list_files",
help='Show plugin names and their source files without summaries (implies --list). %s' % coll_filter)
exclusive.add_argument("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available plugins. %s' % coll_filter)
exclusive.add_argument("--metadata-dump", action="store_true", default=False, dest='dump',
help='**For internal use only** Dump json metadata for all entries, ignores other options.')
# generic again
self.parser.add_argument("--no-fail-on-errors", action="store_true", default=False, dest='no_fail_on_errors',
help='**For internal use only** Only used for --metadata-dump. '
'Do not fail on errors. Report the error message in the JSON instead.')
def post_process_args(self, options):
options = super(DocCLI, self).post_process_args(options)
display.verbosity = options.verbosity
return options
def display_plugin_list(self, results):
# format for user
displace = max(len(x) for x in results.keys())
linelimit = display.columns - displace - 5
text = []
deprecated = []
# format display per option
if context.CLIARGS['list_files']:
# list plugin file names
for plugin in sorted(results.keys()):
filename = to_native(results[plugin])
# handle deprecated for builtin/legacy
pbreak = plugin.split('.')
if pbreak[-1].startswith('_') and pbreak[0] == 'ansible' and pbreak[1] in ('builtin', 'legacy'):
pbreak[-1] = pbreak[-1][1:]
plugin = '.'.join(pbreak)
deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(filename), filename))
else:
# list plugin names and short desc
for plugin in sorted(results.keys()):
desc = DocCLI.tty_ify(results[plugin])
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
pbreak = plugin.split('.')
# TODO: add mark for deprecated collection plugins
if pbreak[-1].startswith('_') and plugin.startswith(('ansible.builtin.', 'ansible.legacy.')):
# Handle deprecated ansible.builtin plugins
pbreak[-1] = pbreak[-1][1:]
plugin = '.'.join(pbreak)
deprecated.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
# display results
DocCLI.pager("\n".join(text))
def _display_available_roles(self, list_json):
"""Display all roles we can find with a valid argument specification.
Output is: fqcn role name, entry point, short description
"""
roles = list(list_json.keys())
entry_point_names = set() # to find max len
for role in roles:
for entry_point in list_json[role]['entry_points'].keys():
entry_point_names.add(entry_point)
max_role_len = 0
max_ep_len = 0
if entry_point_names:
max_ep_len = max(len(x) for x in entry_point_names)
linelimit = display.columns - max_role_len - max_ep_len - 5
text = []
for role in sorted(roles):
if list_json[role]['entry_points']:
text.append('%s:' % role)
text.append(' specs:')
for entry_point, desc in list_json[role]['entry_points'].items():
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
text.append(" %-*s: %s" % (max_ep_len, entry_point, desc))
else:
text.append('%s' % role)
# display results
DocCLI.pager("\n".join(text))
def _display_role_doc(self, role_json):
roles = list(role_json.keys())
text = []
for role in roles:
try:
if 'error' in role_json[role]:
display.warning("Skipping role '%s' due to: %s" % (role, role_json[role]['error']), True)
continue
text += self.get_role_man_text(role, role_json[role])
except AnsibleError as ex:
# TODO: warn and skip role?
raise AnsibleParserError(f"Error extracting role docs from {role!r}.") from ex
# display results
DocCLI.pager("\n".join(text))
@staticmethod
def _list_keywords():
return yaml.load(pkgutil.get_data('ansible', 'keyword_desc.yml'), Loader=AnsibleInstrumentedLoader)
@staticmethod
def _get_keywords_docs(keys):
data = {}
descs = DocCLI._list_keywords()
for key in keys:
if key.startswith('with_'):
# simplify loops, dont want to handle every with_<lookup> combo
keyword = 'loop'
elif key == 'async':
# cause async became reserved in python we had to rename internally
keyword = 'async_val'
else:
keyword = key
try:
# if no desc, typeerror raised ends this block
kdata = {'description': descs[key]}
# get playbook objects for keyword and use first to get keyword attributes
kdata['applies_to'] = []
for pobj in PB_OBJECTS:
if pobj not in PB_LOADED:
obj_class = 'ansible.playbook.%s' % pobj.lower()
loaded_class = importlib.import_module(obj_class)
PB_LOADED[pobj] = getattr(loaded_class, pobj, None)
if keyword in PB_LOADED[pobj].fattributes:
kdata['applies_to'].append(pobj)
# we should only need these once
if 'type' not in kdata:
fa = PB_LOADED[pobj].fattributes.get(keyword)
if getattr(fa, 'private'):
kdata = {}
raise KeyError
kdata['type'] = getattr(fa, 'isa', 'string')
if keyword.endswith('when') or keyword in ('until',):
# TODO: make this a field attribute property,
# would also helps with the warnings on {{}} stacking
kdata['template'] = 'implicit'
elif getattr(fa, 'static'):
kdata['template'] = 'static'
else:
kdata['template'] = 'explicit'
# those that require no processing
for visible in ('alias', 'priority'):
kdata[visible] = getattr(fa, visible)
# remove None keys
for k in list(kdata.keys()):
if kdata[k] is None:
del kdata[k]
data[key] = kdata
except (AttributeError, KeyError) as ex:
display.error_as_warning(f'Skipping invalid keyword {key!r}.', ex)
return data
def _get_collection_filter(self):
coll_filter = None
if len(context.CLIARGS['args']) >= 1:
coll_filter = context.CLIARGS['args']
for coll_name in coll_filter:
if not AnsibleCollectionRef.is_valid_collection_name(coll_name):
raise AnsibleError('Invalid collection name (must be of the form namespace.collection): {0}'.format(coll_name))
return coll_filter
def _list_plugins(self, plugin_type, content):
DocCLI._prep_loader(plugin_type)
coll_filter = self._get_collection_filter()
plugins = _list_plugins_with_info(plugin_type, coll_filter)
# Remove the internal ansible._protomatter plugins if getting all plugins
if not coll_filter:
plugins = {k: v for k, v in plugins.items() if not k.startswith('ansible._protomatter.')}
# get appropriate content depending on option
if content == 'dir':
results = self._get_plugin_list_descriptions(plugins)
elif content == 'files':
results = {k: v.path for k, v in plugins.items()}
else:
results = {k: {} for k in plugins.keys()}
self.plugin_list = set() # reset for next iteration
return results
def _get_plugins_docs(self, plugin_type: str, names: collections.abc.Iterable[str], fail_ok: bool = False, fail_on_errors: bool = True) -> dict[str, dict]:
loader = DocCLI._prep_loader(plugin_type)
if plugin_type in ('filter', 'test'):
jinja2_builtins = _jinja_plugins.get_jinja_builtin_plugin_descriptions(plugin_type)
jinja2_builtins.update({name.split('.')[-1]: value for name, value in jinja2_builtins.items()}) # add short-named versions for lookup
else:
jinja2_builtins = {}
# get the docs for plugins in the command line list
plugin_docs = {}
for plugin in names:
doc: dict[str, t.Any] = {}
try:
doc, plainexamples, returndocs, metadata = self._get_plugin_docs_with_jinja2_builtins(
plugin,
plugin_type,
loader,
fragment_loader,
jinja2_builtins,
)
except AnsiblePluginNotFound as e:
display.warning(to_native(e))
continue
except Exception as ex:
msg = "Missing documentation (or could not parse documentation)"
if not fail_on_errors:
plugin_docs[plugin] = {'error': f'{msg}: {ex}.'}
continue
msg = f"{plugin_type} {plugin} {msg}"
if fail_ok:
display.warning(f'{msg}: {ex}')
else:
raise AnsibleError(f'{msg}.') from ex
if not doc:
# The doc section existed but was empty
if not fail_on_errors:
plugin_docs[plugin] = {'error': 'No valid documentation found'}
continue
docs = DocCLI._combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata)
if not fail_on_errors:
# Check whether JSON serialization would break
try:
_json.json_dumps_formatted(docs)
except Exception as ex: # pylint:disable=broad-except
plugin_docs[plugin] = {'error': f'Cannot serialize documentation as JSON: {ex}'}
continue
plugin_docs[plugin] = docs
return plugin_docs
def _get_plugin_docs_with_jinja2_builtins(
self,
plugin_name: str,
plugin_type: str,
loader: t.Any,
fragment_loader: t.Any,
jinja_builtins: dict[str, str],
) -> tuple[dict, str | None, dict | None, dict | None]:
try:
return get_plugin_docs(plugin_name, plugin_type, loader, fragment_loader, (context.CLIARGS['verbosity'] > 0))
except Exception:
if (desc := jinja_builtins.get(plugin_name, ...)) is not ...:
short_name = plugin_name.split('.')[-1]
long_name = f'ansible.builtin.{short_name}'
# Dynamically build a doc stub for any Jinja2 builtin plugin we haven't
# explicitly documented.
doc = dict(
collection='ansible.builtin',
plugin_name=long_name,
filename='',
short_description=desc,
description=[
desc,
'',
f"This is the Jinja builtin {plugin_type} plugin {short_name!r}.",
f"See: U(https://jinja.palletsprojects.com/en/stable/templates/#jinja-{plugin_type}s.{short_name})",
],
)
return doc, None, None, None
raise
def _get_roles_path(self):
"""
Add any 'roles' subdir in playbook dir to the roles search path.
And as a last resort, add the playbook dir itself. Order being:
- 'roles' subdir of playbook dir
- DEFAULT_ROLES_PATH (default in cliargs)
- playbook dir (basedir)
NOTE: This matches logic in RoleDefinition._load_role_path() method.
"""
roles_path = context.CLIARGS['roles_path']
if context.CLIARGS['basedir'] is not None:
subdir = os.path.join(context.CLIARGS['basedir'], "roles")
if os.path.isdir(subdir):
roles_path = (subdir,) + roles_path
roles_path = roles_path + (context.CLIARGS['basedir'],)
return roles_path
@staticmethod
def _prep_loader(plugin_type):
""" return a plugint type specific loader """
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
# add to plugin paths from command line
if context.CLIARGS['basedir'] is not None:
loader.add_directory(context.CLIARGS['basedir'], with_subdir=True)
if context.CLIARGS['module_path']:
for path in context.CLIARGS['module_path']:
if path:
loader.add_directory(path)
# save only top level paths for errors
loader._paths = None # reset so we can use subdirs later
return loader
def run(self):
super(DocCLI, self).run()
basedir = context.CLIARGS['basedir']
plugin_type = context.CLIARGS['type'].lower()
do_json = context.CLIARGS['json_format'] or context.CLIARGS['dump']
listing = context.CLIARGS['list_files'] or context.CLIARGS['list_dir']
no_fail = bool(not context.CLIARGS['no_fail_on_errors'])
if context.CLIARGS['list_files']:
content = 'files'
elif context.CLIARGS['list_dir']:
content = 'dir'
else:
content = None
docs = {}
if basedir:
AnsibleCollectionConfig.playbook_paths = basedir
if plugin_type not in TARGET_OPTIONS:
raise AnsibleOptionsError("Unknown or undocumentable plugin type: %s" % plugin_type)
if context.CLIARGS['dump']:
# we always dump all types, ignore restrictions
ptypes = TARGET_OPTIONS
docs['all'] = {}
for ptype in ptypes:
if ptype == 'role':
roles = self._create_role_list(fail_on_errors=no_fail)
docs['all'][ptype] = self._create_role_doc(roles.keys(), context.CLIARGS['entry_point'], fail_on_errors=no_fail)
elif ptype == 'keyword':
names = DocCLI._list_keywords()
docs['all'][ptype] = DocCLI._get_keywords_docs(names.keys())
else:
plugin_names = self._list_plugins(ptype, None)
docs['all'][ptype] = self._get_plugins_docs(ptype, plugin_names, fail_ok=(ptype in ('test', 'filter')), fail_on_errors=no_fail)
# reset list after each type to avoid pollution
elif listing:
if plugin_type == 'keyword':
docs = DocCLI._list_keywords()
elif plugin_type == 'role':
docs = self._create_role_list(fail_on_errors=False)
else:
docs = self._list_plugins(plugin_type, content)
else:
# here we require a name
if len(context.CLIARGS['args']) == 0:
raise AnsibleOptionsError("Missing name(s), incorrect options passed for detailed documentation.")
if plugin_type == 'keyword':
docs = DocCLI._get_keywords_docs(context.CLIARGS['args'])
elif plugin_type == 'role':
docs = self._create_role_doc(context.CLIARGS['args'], context.CLIARGS['entry_point'], fail_on_errors=no_fail)
else:
# display specific plugin docs
docs = self._get_plugins_docs(plugin_type, context.CLIARGS['args'])
# Display the docs
if do_json:
jdump(docs)
else:
text = []
if plugin_type in C.DOCUMENTABLE_PLUGINS:
if listing and docs:
self.display_plugin_list(docs)
elif context.CLIARGS['show_snippet']:
if plugin_type not in SNIPPETS:
raise AnsibleError('Snippets are only available for the following plugin'
' types: %s' % ', '.join(SNIPPETS))
for plugin, doc_data in docs.items():
try:
textret = DocCLI.format_snippet(plugin, plugin_type, doc_data['doc'])
except ValueError as e:
display.warning("Unable to construct a snippet for"
" '{0}': {1}".format(plugin, to_text(e)))
else:
text.append(textret)
else:
# Some changes to how plain text docs are formatted
for plugin, doc_data in docs.items():
textret = DocCLI.format_plugin_doc(plugin, plugin_type,
doc_data['doc'], doc_data['examples'],
doc_data['return'], doc_data['metadata'])
if textret:
text.append(textret)
else:
display.warning("No valid documentation was retrieved from '%s'" % plugin)
elif plugin_type == 'role':
if context.CLIARGS['list_dir'] and docs:
self._display_available_roles(docs)
elif docs:
self._display_role_doc(docs)
elif docs:
text = DocCLI.tty_ify(DocCLI._dump_yaml(docs))
if text:
DocCLI.pager(''.join(text))
return 0
@staticmethod
def get_all_plugins_of_type(plugin_type):
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
paths = loader._get_paths_with_context()
plugins = []
for path_context in paths:
plugins += _list_plugins_with_info(plugin_type).keys()
return sorted(plugins)
@staticmethod
def get_plugin_metadata(plugin_type, plugin_name):
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
loader = getattr(plugin_loader, '%s_loader' % plugin_type)
result = loader.find_plugin_with_context(plugin_name, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if not result.resolved:
raise AnsibleError("unable to load {0} plugin named {1} ".format(plugin_type, plugin_name))
filename = result.plugin_resolved_path
collection_name = result.plugin_resolved_collection
try:
doc, __, __, __ = get_docstring(filename, fragment_loader, verbose=(context.CLIARGS['verbosity'] > 0),
collection_name=collection_name, plugin_type=plugin_type)
except Exception as ex:
raise AnsibleError(f"{plugin_type} {plugin_name} at {filename!r} has a documentation formatting error or is missing documentation.") from ex
if doc is None:
# Removed plugins don't have any documentation
return None
return dict(
name=plugin_name,
namespace=DocCLI.namespace_from_plugin_filepath(filename, plugin_name, loader.package_path),
description=doc.get('short_description', "UNKNOWN"),
version_added=doc.get('version_added', "UNKNOWN")
)
@staticmethod
def namespace_from_plugin_filepath(filepath, plugin_name, basedir):
if not basedir.endswith('/'):
basedir += '/'
rel_path = filepath.replace(basedir, '')
extension_free = os.path.splitext(rel_path)[0]
namespace_only = extension_free.rsplit(plugin_name, 1)[0].strip('/_')
clean_ns = namespace_only.replace('/', '.')
if clean_ns == '':
clean_ns = None
return clean_ns
@staticmethod
def _combine_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
# generate extra data
if plugin_type == 'module':
# is there corresponding action plugin?
if plugin in action_loader:
doc['has_action'] = True
else:
doc['has_action'] = False
# return everything as one dictionary
return {'doc': doc, 'examples': plainexamples, 'return': returndocs, 'metadata': metadata}
@staticmethod
def format_snippet(plugin, plugin_type, doc):
""" return heavily commented plugin use to insert into play """
if plugin_type == 'inventory' and doc.get('options', {}).get('plugin'):
# these do not take a yaml config that we can write a snippet for
raise ValueError('The {0} inventory plugin does not take YAML type config source'
' that can be used with the "auto" plugin so a snippet cannot be'
' created.'.format(plugin))
text = []
if plugin_type == 'lookup':
text = _do_lookup_snippet(doc)
elif 'options' in doc:
text = _do_yaml_snippet(doc)
text.append('')
return "\n".join(text)
@staticmethod
def format_plugin_doc(plugin, plugin_type, doc, plainexamples, returndocs, metadata):
collection_name = doc['collection']
# TODO: do we really want this?
# add_collection_to_versions_and_dates(doc, '(unknown)', is_module=(plugin_type == 'module'))
# remove_current_collection_from_versions_and_dates(doc, collection_name, is_module=(plugin_type == 'module'))
# remove_current_collection_from_versions_and_dates(
# returndocs, collection_name, is_module=(plugin_type == 'module'), return_docs=True)
# assign from other sections
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
try:
text = DocCLI.get_man_text(doc, collection_name, plugin_type)
except Exception as ex:
raise AnsibleError(f"Unable to retrieve documentation from {plugin!r}.") from ex
return text
def _get_plugin_list_descriptions(self, plugins: dict[str, _PluginDocMetadata]) -> dict[str, str]:
descs = {}
for plugin, plugin_info in plugins.items():
# TODO: move to plugin itself i.e: plugin.get_desc()
doc = None
docerror = None
if plugin_info.path:
filename = Path(to_native(plugin_info.path))
try:
doc = read_docstub(filename)
except Exception as e:
docerror = e
# plugin file was empty or had error, lets try other options
if doc is None:
# handle test/filters that are in file with diff name
base = plugin.split('.')[-1]
basefile = filename.with_name(base + filename.suffix)
for extension in C.DOC_EXTENSIONS:
docfile = basefile.with_suffix(extension)
try:
if docfile.exists():
doc = read_docstub(docfile)
except Exception as e:
docerror = e
# Do a final fallback to see if the plugin is a shadowed Jinja2 plugin
# without any explicit documentation.
if doc is None and plugin_info.jinja_builtin_short_description:
descs[plugin] = plugin_info.jinja_builtin_short_description
continue
if docerror:
display.error_as_warning(f"{plugin} has a documentation formatting error.", exception=docerror)
continue
if not doc or not isinstance(doc, dict):
desc = 'UNDOCUMENTED'
else:
desc = doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip()
descs[plugin] = desc
return descs
@staticmethod
def print_paths(finder):
""" Returns a string suitable for printing of the search path """
# Uses a list to get the order right
ret = []
for i in finder._get_paths(subdirs=False):
i = to_text(i, errors='surrogate_or_strict')
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
@staticmethod
def _dump_yaml(struct, flow_style=False):
return yaml_dump(struct, default_flow_style=flow_style, default_style="''", Dumper=AnsibleDumper).rstrip('\n')
@staticmethod
def _indent_lines(text, indent):
return DocCLI.tty_ify('\n'.join([indent + line for line in text.split('\n')]))
@staticmethod
def _format_version_added(version_added, version_added_collection=None):
if version_added_collection == 'ansible.builtin':
version_added_collection = 'ansible-core'
# In ansible-core, version_added can be 'historical'
if version_added == 'historical':
return 'historical'
if version_added_collection:
version_added = '%s of %s' % (version_added, version_added_collection)
return 'version %s' % (version_added, )
@staticmethod
def warp_fill(text, limit, initial_indent='', subsequent_indent='', initial_extra=0, **kwargs):
result = []
for paragraph in text.split('\n\n'):
wrapped = textwrap.fill(paragraph, limit, initial_indent=initial_indent + ' ' * initial_extra, subsequent_indent=subsequent_indent,
break_on_hyphens=False, break_long_words=False, drop_whitespace=True, **kwargs)
if initial_extra and wrapped.startswith(' ' * initial_extra):
wrapped = wrapped[initial_extra:]
result.append(wrapped)
initial_indent = subsequent_indent
initial_extra = 0
return '\n'.join(result)
@staticmethod
def add_fields(text, fields, limit, opt_indent, return_values=False, base_indent='', man=False):
for o in sorted(fields):
# Create a copy so we don't modify the original (in case YAML anchors have been used)
opt = dict(fields[o])
# required is used as indicator and removed
required = opt.pop('required', False)
if not isinstance(required, bool):
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
opt_leadin = ' '
key = ''
if required:
if C.ANSIBLE_NOCOLOR:
opt_leadin = "="
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'bold', 'red'))
else:
if C.ANSIBLE_NOCOLOR:
opt_leadin = "-"
key = "%s%s %s" % (base_indent, opt_leadin, _format(o, 'yellow'))
# description is specifically formatted and can either be string or list of strings
if 'description' not in opt:
raise AnsibleError("All (sub-)options and return values must have a 'description' field", obj=o)
text.append('')
# TODO: push this to top of for and sort by size, create indent on largest key?
inline_indent = ' ' * max((len(opt_indent) - len(o)) - len(base_indent), 2)
extra_indent = base_indent + ' ' * (len(o) + 3)
sub_indent = inline_indent + extra_indent
if is_sequence(opt['description']):
for entry_idx, entry in enumerate(opt['description'], 1):
if not isinstance(entry, str):
raise AnsibleError("Expected string in description of %s at index %s, got %s" % (o, entry_idx, type(entry)))
if entry_idx == 1:
text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(entry), limit,
initial_indent=inline_indent, subsequent_indent=sub_indent, initial_extra=len(extra_indent)))
else:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(entry), limit, initial_indent=sub_indent, subsequent_indent=sub_indent))
else:
if not isinstance(opt['description'], str):
raise AnsibleError("Expected string in description of %s, got %s" % (o, type(opt['description'])))
text.append(key + DocCLI.warp_fill(DocCLI.tty_ify(opt['description']), limit,
initial_indent=inline_indent, subsequent_indent=sub_indent, initial_extra=len(extra_indent)))
del opt['description']
suboptions = []
for subkey in ('options', 'suboptions', 'contains', 'spec'):
if subkey in opt:
suboptions.append((subkey, opt.pop(subkey)))
if not required and not return_values and 'default' not in opt:
opt['default'] = None
# sanitize config items
conf = {}
for config in ('env', 'ini', 'yaml', 'vars', 'keyword'):
if config in opt and opt[config]:
# Create a copy so we don't modify the original (in case YAML anchors have been used)
conf[config] = [dict(item) for item in opt.pop(config)]
for ignore in DocCLI.IGNORE:
for item in conf[config]:
if display.verbosity > 0 and 'version_added' in item:
item['added_in'] = DocCLI._format_version_added(item['version_added'], item.get('version_added_colleciton', 'ansible-core'))
if ignore in item:
del item[ignore]
# reformat cli options
if 'cli' in opt and opt['cli']:
conf['cli'] = []
for cli in opt['cli']:
if 'option' not in cli:
conf['cli'].append({'name': cli['name'], 'option': '--%s' % cli['name'].replace('_', '-')})
else:
conf['cli'].append(cli)
del opt['cli']
# add custom header for conf
if conf:
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({'set_via': conf}), opt_indent))
# these we handle at the end of generic option processing
version_added = opt.pop('version_added', None)
version_added_collection = opt.pop('version_added_collection', None)
# general processing for options
for k in sorted(opt):
if k.startswith('_'):
continue
if is_sequence(opt[k]):
text.append(DocCLI._indent_lines('%s: %s' % (k, DocCLI._dump_yaml(opt[k], flow_style=True)), opt_indent))
else:
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k: opt[k]}), opt_indent))
if version_added and not man:
text.append("%sadded in: %s" % (opt_indent, DocCLI._format_version_added(version_added, version_added_collection)))
for subkey, subdata in suboptions:
text.append("%s%s:" % (opt_indent, subkey))
DocCLI.add_fields(text, subdata, limit, opt_indent + ' ', return_values, opt_indent)
@staticmethod
def _add_seealso(text: list[str], seealsos: list[dict[str, t.Any]], limit: int, opt_indent: str) -> None:
for item in seealsos:
if 'module' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify('Module %s' % item['module']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
description = item.get('description')
if description is None and item['module'].startswith('ansible.builtin.'):
description = 'The official documentation on the %s module.' % item['module']
if description is not None:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(description),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
if item['module'].startswith('ansible.builtin.'):
relative_url = 'collections/%s_module.html' % item['module'].replace('.', '/', 2)
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
elif 'plugin' in item and 'plugin_type' in item:
plugin_suffix = ' plugin' if item['plugin_type'] not in ('module', 'role') else ''
text.append(DocCLI.warp_fill(DocCLI.tty_ify('%s%s %s' % (item['plugin_type'].title(), plugin_suffix, item['plugin'])),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
description = item.get('description')
if description is None and item['plugin'].startswith('ansible.builtin.'):
description = 'The official documentation on the %s %s%s.' % (item['plugin'], item['plugin_type'], plugin_suffix)
if description is not None:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(description),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
if item['plugin'].startswith('ansible.builtin.'):
relative_url = 'collections/%s_%s.html' % (item['plugin'].replace('.', '/', 2), item['plugin_type'])
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink(relative_url)),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent))
elif 'name' in item and 'link' in item and 'description' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['name']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['link']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
elif 'ref' in item and 'description' in item:
text.append(DocCLI.warp_fill(DocCLI.tty_ify('Ansible documentation [%s]' % item['ref']),
limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(item['description']),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
text.append(DocCLI.warp_fill(DocCLI.tty_ify(get_versioned_doclink('/#stq=%s&stp=1' % item['ref'])),
limit - 6, initial_indent=opt_indent + ' ', subsequent_indent=opt_indent + ' '))
def get_role_man_text(self, role, role_json):
"""Generate text for the supplied role suitable for display.
This is similar to get_man_text(), but roles are different enough that we have
a separate method for formatting their display.
:param role: The role name.
:param role_json: The JSON for the given role as returned from _create_role_doc().
:returns: A array of text suitable for displaying to screen.
"""
text = []
opt_indent = " "
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> ROLE: %s (%s)" % (_format(role, 'BOLD'), role_json.get('path')))
for entry_point in role_json['entry_points']:
doc = role_json['entry_points'][entry_point]
desc = ''
if doc.get('short_description'):
desc = "- %s" % (doc.get('short_description'))
text.append('')
text.append("ENTRY POINT: %s %s" % (_format(entry_point, "BOLD"), desc))
text.append('')
if version_added := doc.pop('version_added', None):
text.append(_format("ADDED IN:", 'bold') + " %s\n" % DocCLI._format_version_added(version_added))
if doc.get('description'):
if isinstance(doc['description'], list):
descs = doc['description']
else:
descs = [doc['description']]
for desc in descs:
text.append("%s" % DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
text.append('')
if doc.get('options'):
text.append(_format("Options", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent)
if notes := doc.pop('notes', False):
text.append("")
text.append(_format("NOTES:", 'bold'))
for note in notes:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(note), limit - 6,
initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
if seealso := doc.pop('seealso', False):
text.append("")
text.append(_format("SEE ALSO:", 'bold'))
DocCLI._add_seealso(text, seealso, limit=limit, opt_indent=opt_indent)
# generic elements we will handle identically
for k in ('author',):
if k not in doc:
continue
text.append('')
if isinstance(doc[k], str):
text.append('%s: %s' % (k.upper(), DocCLI.warp_fill(DocCLI.tty_ify(doc[k]),
limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
else:
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append(DocCLI._indent_lines(DocCLI._dump_yaml({k.upper(): doc[k]}), ''))
if doc.get('examples', False):
text.append('')
text.append(_format("EXAMPLES:", 'bold'))
if isinstance(doc['examples'], str):
text.append(doc.pop('examples').strip())
else:
try:
text.append(yaml_dump(doc.pop('examples'), indent=2, default_flow_style=False))
except Exception as e:
raise AnsibleParserError("Unable to parse examples section.") from e
return text
@staticmethod
def get_man_text(doc, collection_name='', plugin_type=''):
# Create a copy so we don't modify the original
doc = dict(doc)
DocCLI.IGNORE = DocCLI.IGNORE + (context.CLIARGS['type'],)
opt_indent = " "
base_indent = " "
text = []
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> %s %s (%s)" % (plugin_type.upper(), _format(doc.pop('plugin_name'), 'bold'), doc.pop('filename') or 'Jinja2'))
if isinstance(doc['description'], list):
descs = doc.pop('description')
else:
descs = [doc.pop('description')]
text.append('')
for desc in descs:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(desc), limit, initial_indent=base_indent, subsequent_indent=base_indent))
if display.verbosity > 0:
doc['added_in'] = DocCLI._format_version_added(doc.pop('version_added', 'historical'), doc.pop('version_added_collection', 'ansible-core'))
if doc.get('deprecated', False):
text.append(_format("DEPRECATED: ", 'bold', 'DEP'))
if isinstance(doc['deprecated'], dict):
if 'removed_at_date' not in doc['deprecated'] and 'version' in doc['deprecated'] and 'removed_in' not in doc['deprecated']:
doc['deprecated']['removed_in'] = doc['deprecated']['version']
try:
text.append('\t' + C.config.get_deprecated_msg_from_config(doc['deprecated'], True, collection_name=collection_name))
except KeyError as e:
raise AnsibleError("Invalid deprecation documentation structure.") from e
else:
text.append("%s" % doc['deprecated'])
del doc['deprecated']
if doc.pop('has_action', False):
text.append("")
text.append(_format(" * note:", 'bold') + " This module has a corresponding action plugin.")
if doc.get('options', False):
text.append("")
text.append(_format("OPTIONS", 'bold') + " (%s indicates it is required):" % ("=" if C.ANSIBLE_NOCOLOR else 'red'))
DocCLI.add_fields(text, doc.pop('options'), limit, opt_indent, man=(display.verbosity == 0))
if doc.get('attributes', False):
text.append("")
text.append(_format("ATTRIBUTES:", 'bold'))
for k in doc['attributes'].keys():
text.append('')
text.append(DocCLI.warp_fill(DocCLI.tty_ify(_format('%s:' % k, 'UNDERLINE')), limit - 6, initial_indent=opt_indent,
subsequent_indent=opt_indent))
text.append(DocCLI._indent_lines(DocCLI._dump_yaml(doc['attributes'][k]), opt_indent))
del doc['attributes']
if doc.get('notes', False):
text.append("")
text.append(_format("NOTES:", 'bold'))
for note in doc['notes']:
text.append(DocCLI.warp_fill(DocCLI.tty_ify(note), limit - 6,
initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
del doc['notes']
if doc.get('seealso', False):
text.append("")
text.append(_format("SEE ALSO:", 'bold'))
DocCLI._add_seealso(text, doc['seealso'], limit=limit, opt_indent=opt_indent)
del doc['seealso']
if doc.get('requirements', False):
text.append('')
req = ", ".join(doc.pop('requirements'))
text.append(_format("REQUIREMENTS:", 'bold') + "%s\n" % DocCLI.warp_fill(DocCLI.tty_ify(req), limit - 16, initial_indent=" ",
subsequent_indent=opt_indent))
# Generic handler
for k in sorted(doc):
if not doc[k] or k in DocCLI.IGNORE:
continue
text.append('')
header = _format(k.upper(), 'bold')
if isinstance(doc[k], str):
text.append('%s: %s' % (header, DocCLI.warp_fill(DocCLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (header, ', '.join(doc[k])))
else:
# use empty indent since this affects the start of the yaml doc, not it's keys
text.append('%s: ' % header + DocCLI._indent_lines(DocCLI._dump_yaml(doc[k]), ' ' * (len(k) + 2)))
del doc[k]
if doc.get('plainexamples', False):
text.append('')
text.append(_format("EXAMPLES:", 'bold'))
if isinstance(doc['plainexamples'], str):
text.append(doc.pop('plainexamples').strip())
else:
try:
text.append(yaml_dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
except Exception as ex:
raise AnsibleParserError("Unable to parse examples section.") from ex
if doc.get('returndocs', False):
text.append('')
text.append(_format("RETURN VALUES:", 'bold'))
DocCLI.add_fields(text, doc.pop('returndocs'), limit, opt_indent, return_values=True, man=(display.verbosity == 0))
text.append('\n')
return "\n".join(text)
def _do_yaml_snippet(doc):
text = []
mdesc = DocCLI.tty_ify(doc['short_description'])
module = doc.get('module')
if module:
# this is actually a usable task!
text.append("- name: %s" % (mdesc))
text.append(" %s:" % (module))
else:
# just a comment, hopefully useful yaml file
text.append("# %s:" % doc.get('plugin', doc.get('name')))
pad = 29
subdent = '# '.rjust(pad + 2)
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
if isinstance(opt['description'], str):
desc = DocCLI.tty_ify(opt['description'])
else:
desc = DocCLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
o = '%s:' % o
if module:
if required:
desc = "(required) %s" % desc
text.append(" %-20s # %s" % (o, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent)))
else:
if required:
default = '(required)'
else:
default = opt.get('default', 'None')
text.append("%s %-9s # %s" % (o, default, DocCLI.warp_fill(desc, limit, subsequent_indent=subdent, max_lines=3)))
return text
def _do_lookup_snippet(doc):
text = []
snippet = "lookup('%s', " % doc.get('plugin', doc.get('name'))
comment = []
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
comment.append('# %s(%s): %s' % (o, opt.get('type', 'string'), opt.get('description', '')))
if o in ('_terms', '_raw', '_list'):
# these are 'list of arguments'
snippet += '< %s >' % (o)
continue
required = opt.get('required', False)
if not isinstance(required, bool):
raise ValueError("Incorrect value for 'Required', a boolean is needed: %s" % required)
if required:
default = '<REQUIRED>'
else:
default = opt.get('default', 'None')
if opt.get('type') in ('string', 'str'):
snippet += ", %s='%s'" % (o, default)
else:
snippet += ', %s=%s' % (o, default)
snippet += ")"
if comment:
text.extend(comment)
text.append('')
text.append(snippet)
return text
def main(args=None):
DocCLI.cli_executor(args)
if __name__ == '__main__':
main()
| DocCLI |
python | sphinx-doc__sphinx | sphinx/ext/inheritance_diagram.py | {
"start": 12658,
"end": 19049
} | class ____(SphinxDirective):
"""Run when the inheritance_diagram directive is first encountered."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {
'parts': int,
'private-bases': directives.flag,
'caption': directives.unchanged,
'top-classes': directives.unchanged_required,
'include-subclasses': directives.flag,
}
def run(self) -> list[Node]:
node = inheritance_diagram()
node.document = self.state.document
class_names = self.arguments[0].split()
class_role = self.env.domains.python_domain.role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
node['top-classes'] = frozenset({
cls_stripped
for cls in self.options.get('top-classes', '').split(',')
if (cls_stripped := cls.strip())
})
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names,
self.env.ref_context.get('py:module'), # type: ignore[arg-type]
parts=node['parts'],
private_bases='private-bases' in self.options,
aliases=self.config.inheritance_alias,
top_classes=node['top-classes'],
include_subclasses='include-subclasses' in self.options,
)
except InheritanceException as err:
return [node.document.reporter.warning(err, line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, _x = class_role( # type: ignore[misc]
'class', f':class:`{name}`', name, 0, self.state.inliner
)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
if 'caption' not in self.options:
self.add_name(node)
return [node]
else:
figure = figure_wrapper(self, node, self.options['caption'])
self.add_name(figure)
return [figure]
def _subclasses(cls: type[Any]) -> Iterator[type[Any]]:
yield cls
for sub_cls in cls.__subclasses__():
yield from _subclasses(sub_cls)
def get_graph_hash(node: inheritance_diagram) -> str:
encoded = (node['content'] + str(node['parts'])).encode()
return hashlib.md5(encoded, usedforsecurity=False).hexdigest()[-10:]
def html_visit_inheritance_diagram(
self: HTML5Translator, node: inheritance_diagram
) -> None:
"""Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
graphviz_output_format = self.config.graphviz_output_format.upper()
current_filename = os.path.basename(
self.builder.current_docname + self.builder.out_suffix
)
urls = {}
pending_xrefs = cast('Iterable[addnodes.pending_xref]', node)
for child in pending_xrefs:
if child.get('refuri') is not None:
# Construct the name from the URI if the reference is external via intersphinx
if not child.get('internal', True):
refname = child['refuri'].rsplit('#', 1)[-1]
else:
refname = child['reftitle']
urls[refname] = child.get('refuri')
elif child.get('refid') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = current_filename + '#' + child.get('refid')
else:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph._generate_dot(name, urls, config=self.config)
render_dot_html(
self,
node,
dotcode,
{},
'inheritance',
'inheritance',
alt='Inheritance diagram of ' + node['content'],
)
raise nodes.SkipNode
def latex_visit_inheritance_diagram(
self: LaTeXTranslator, node: inheritance_diagram
) -> None:
"""Output the graph for LaTeX. This will insert a PDF."""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph._generate_dot(
name, config=self.config, graph_attrs={'size': '"6.0,6.0"'}
)
render_dot_latex(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(
self: TexinfoTranslator,
node: inheritance_diagram,
) -> None:
"""Output the graph for Texinfo. This will insert a PNG."""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph._generate_dot(
name, config=self.config, graph_attrs={'size': '"6.0,6.0"'}
)
render_dot_texinfo(self, node, dotcode, {}, 'inheritance')
raise nodes.SkipNode
def skip(self: nodes.NodeVisitor, node: inheritance_diagram) -> None:
raise nodes.SkipNode
def setup(app: Sphinx) -> ExtensionMetadata:
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None),
)
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, '', types=frozenset({dict}))
app.add_config_value('inheritance_node_attrs', {}, '', types=frozenset({dict}))
app.add_config_value('inheritance_edge_attrs', {}, '', types=frozenset({dict}))
app.add_config_value('inheritance_alias', {}, '', types=frozenset({dict}))
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
| InheritanceDiagram |
python | huggingface__transformers | tests/trainer/test_trainer_utils.py | {
"start": 2798,
"end": 23191
} | class ____(unittest.TestCase):
def test_label_smoothing(self):
epsilon = 0.1
num_labels = 12
random_logits = torch.randn(4, 5, num_labels)
random_labels = torch.randint(0, num_labels, (4, 5))
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
torch.testing.assert_close(label_smoothed_loss, expected_loss)
# With a few -100 labels
random_labels[0, 1] = -100
random_labels[2, 1] = -100
random_labels[2, 3] = -100
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
# Mask the log probs with the -100 labels
log_probs[0, 1] = 0.0
log_probs[2, 1] = 0.0
log_probs[2, 3] = 0.0
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
torch.testing.assert_close(label_smoothed_loss, expected_loss)
def test_group_by_length(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices = list(LengthGroupedSampler(4, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices), list(range(100)))
def test_group_by_length_with_dict(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append({"input_ids": input_ids})
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_group_by_length_with_batch_encoding(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append(BatchEncoding({"input_ids": input_ids}))
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(sorted(indices), list(range(6)))
def test_distributed_length_grouped(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths))
indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices_process_0[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(sorted(indices_process_0 + indices_process_1), list(range(100)))
def test_get_parameter_names(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
# fmt: off
self.assertEqual(
get_parameter_names(model, [nn.LayerNorm]),
['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias']
)
# fmt: on
def test_get_parameter_names_rmsnorm(self):
class RMSNorm(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
class ModelWithRMSNorm(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(128, 128)
self.rmsnorm = RMSNorm(128)
self.bias = nn.Parameter(torch.zeros(128))
model = ModelWithRMSNorm()
# Test both type-based and name-based filtering
decay_parameters = get_parameter_names(model, [], ["bias", "rmsnorm"])
# Parameters that should be in weight decay
self.assertIn("linear.weight", decay_parameters)
# Parameters that should NOT be in weight decay
self.assertNotIn("linear.bias", decay_parameters)
self.assertNotIn("rmsnorm.weight", decay_parameters)
self.assertNotIn("rmsnorm.bias", decay_parameters)
self.assertNotIn("bias", decay_parameters)
def test_distributed_sampler_with_loop(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0)
shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1)
# Set seeds
shard1.set_epoch(0)
shard2.set_epoch(0)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = []
for sample1, sample2 in zip(samples1, samples2):
total += [sample1, sample2]
self.assertEqual(set(total[:length]), set(dataset))
self.assertEqual(set(total[length:]), set(total[: (len(total) - length)]))
def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0):
# Set the seed for the base dataset to get the proper reference.
dataset.generator.manual_seed(epoch)
reference = list(dataset)
shards = [
IterableDatasetShard(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard in shards:
shard.set_epoch(epoch)
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
for shard in shards:
# All shards know the total number of samples
self.assertEqual(shard.num_examples, len(reference))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
# Check equivalence between IterableDataset and ShardSampler
dataset.generator.manual_seed(epoch)
reference = list(dataset)
sampler_shards = [
ShardSampler(
reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard, sampler_shard in zip(shard_lists, sampler_shards):
self.assertListEqual(shard, list(sampler_shard))
def test_iterable_dataset_shard(self):
dataset = RandomIterableDataset()
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42)
def test_iterable_dataset_shard_with_length(self):
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i)
for i in range(2)
]
# Build expected shards: each process will have batches of size 4 until there is not enough elements to
# form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4)
expected_shards = [[], []]
current_shard = 0
for i in range(0, 96, 4):
expected_shards[current_shard].extend(list(range(i, i + 4)))
current_shard = 1 - current_shard
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i)
for i in range(2)
]
# When drop_last=False, we get two last full batches by looping back to the beginning.
expected_shards[0].extend(list(range(96, 100)))
expected_shards[1].extend(list(range(0, 4)))
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2):
shards = [
ShardSampler(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
reference = copy.copy(dataset)
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_shard_sampler(self):
for n_elements in [64, 123]:
dataset = list(range(n_elements))
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
@require_accelerate
def test_executable_batch_size(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=True)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size > 16:
raise RuntimeError("CUDA out of memory.")
mock_training_loop_function()
self.assertEqual(batch_sizes, [64, 57, 51, 45, 40, 36, 32, 28, 25, 22, 19, 17, 15])
@require_accelerate
def test_executable_batch_size_no_search(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
mock_training_loop_function()
self.assertEqual(batch_sizes, [64])
@require_accelerate
def test_executable_batch_size_with_error(self):
@find_executable_batch_size(starting_batch_size=64, auto_find_batch_size=False)
def mock_training_loop_function(batch_size):
raise RuntimeError("CUDA out of memory.")
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
self.assertEqual("CUDA out of memory", cm.args[0])
def test_pad_and_concatenate_with_1d(self):
"""Tests whether pad_and_concatenate works with scalars."""
array1 = 1.0
array2 = 2.0
result = numpy_pad_and_concatenate(array1, array2)
self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))
tensor1 = torch.tensor(1.0)
tensor2 = torch.tensor(2.0)
result = torch_pad_and_concatenate(tensor1, tensor2)
self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))
def test_remove_columns_collator(self):
class MockLogger:
def __init__(self) -> None:
self.called = 0
def info(self, msg):
self.called += 1
self.last_msg = msg
data_batch = [
{"col1": 1, "col2": 2, "col3": 3},
{"col1": 1, "col2": 2, "col3": 3},
]
logger = MockLogger()
remove_columns_collator = RemoveColumnsCollator(
default_data_collator, ["col1", "col2"], logger, "model", "training"
)
self.assertNotIn("col3", remove_columns_collator(data_batch))
# check that the logging message is printed out only once
remove_columns_collator(data_batch)
remove_columns_collator(data_batch)
self.assertEqual(logger.called, 1)
self.assertIn("col3", logger.last_msg)
def test_eval_loop_container(self):
batch_1 = [
torch.ones([8, 5]),
{"loss": torch.tensor(1.0)},
(torch.ones([8, 2, 3]), torch.ones([8, 2])),
]
batch_2 = [
torch.ones([4, 5]),
{"loss": torch.tensor(2.0)},
(torch.ones([4, 2, 3]), torch.ones([4, 6])),
]
concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100)
concat_container.add(batch_1)
concat_container.add(batch_2)
concat_container.to_cpu_and_numpy()
arrays = concat_container.get_arrays()
# Test two nested batches concatenation
self.assertIsInstance(arrays, list)
self.assertEqual(len(arrays), 3)
self.assertIsInstance(arrays[0], np.ndarray)
self.assertEqual(arrays[0].shape, (12, 5))
self.assertIsInstance(arrays[1], dict)
self.assertIsInstance(arrays[1]["loss"], np.ndarray)
self.assertEqual(arrays[1]["loss"].shape, (2,))
self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0, 2.0])))
self.assertIsInstance(arrays[2], tuple)
self.assertEqual(len(arrays[2]), 2)
self.assertEqual(arrays[2][0].shape, (12, 2, 3))
self.assertEqual(arrays[2][1].shape, (12, 6))
# check that first batch padded with padding index -100 after concatenation
self.assertEqual(arrays[2][1][0][2], -100)
# Test two batches with no concatenation
list_container = EvalLoopContainer(do_nested_concat=False)
list_container.add(batch_1)
list_container.add(batch_2)
list_container.to_cpu_and_numpy()
arrays = list_container.get_arrays()
self.assertEqual(len(arrays), 2)
self.assertIsInstance(arrays, list)
np_batch_1, np_batch_2 = arrays
self.assertIsInstance(np_batch_1, list)
self.assertEqual(len(np_batch_1), 3)
self.assertIsInstance(np_batch_1[0], np.ndarray)
self.assertIsInstance(np_batch_1[1], dict)
self.assertIsInstance(np_batch_1[2], tuple)
self.assertEqual(np_batch_1[0].shape, (8, 5))
self.assertEqual(np_batch_1[1]["loss"].shape, ())
self.assertEqual(np_batch_1[2][0].shape, (8, 2, 3))
self.assertEqual(np_batch_1[2][1].shape, (8, 2))
self.assertIsInstance(np_batch_2, list)
self.assertEqual(len(np_batch_2), 3)
self.assertIsInstance(np_batch_2[0], np.ndarray)
self.assertIsInstance(np_batch_2[1], dict)
self.assertIsInstance(np_batch_2[2], tuple)
self.assertEqual(np_batch_2[0].shape, (4, 5))
self.assertEqual(np_batch_2[1]["loss"].shape, ())
self.assertEqual(np_batch_2[2][0].shape, (4, 2, 3))
self.assertEqual(np_batch_2[2][1].shape, (4, 6))
# Test no batches
none_arr = EvalLoopContainer(do_nested_concat=True, padding_index=-100).get_arrays()
self.assertIsNone(none_arr)
none_arr = EvalLoopContainer(do_nested_concat=False).get_arrays()
self.assertIsNone(none_arr)
# Test one batch
concat_container = EvalLoopContainer(do_nested_concat=True, padding_index=-100)
concat_container.add(batch_1)
arrays = concat_container.get_arrays()
self.assertIsInstance(arrays, list)
self.assertEqual(len(arrays), 3)
self.assertIsInstance(arrays[0], np.ndarray)
self.assertEqual(arrays[0].shape, (8, 5))
self.assertIsInstance(arrays[1], dict)
self.assertIsInstance(arrays[1]["loss"], np.ndarray)
self.assertEqual(arrays[1]["loss"].shape, ())
self.assertTrue(np.allclose(arrays[1]["loss"], np.array([1.0])))
self.assertIsInstance(arrays[2], tuple)
self.assertEqual(len(arrays[2]), 2)
self.assertEqual(arrays[2][0].shape, (8, 2, 3))
self.assertEqual(arrays[2][1].shape, (8, 2))
def test_label_smoothing_multi_label_incompatibility(self):
"""Test that Trainer warns and disables label smoothing for multi-label classification"""
# Mock model config with multi-label classification
class MockConfig:
problem_type = "multi_label_classification"
class MockModel(nn.Module):
def __init__(self):
super().__init__()
self.config = MockConfig()
self.linear = nn.Linear(10, 3)
def forward(self, **kwargs):
return {"logits": torch.randn(2, 3)}
model = MockModel()
# Create training args with label smoothing
training_args = TrainingArguments(
output_dir="./test-trainer",
label_smoothing_factor=0.1,
per_device_train_batch_size=2,
num_train_epochs=1,
)
# Should warn and disable label smoothing
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
trainer = Trainer(model=model, args=training_args)
# Check warning was issued
self.assertEqual(len(w), 1)
self.assertIn("Label smoothing is not compatible with multi-label classification", str(w[0].message))
# Check label_smoother was disabled
self.assertIsNone(trainer.label_smoother)
| TrainerUtilsTest |
python | pytorch__pytorch | test/export/test_package.py | {
"start": 341,
"end": 2853
} | class ____(TestCase):
def test_basic(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return x + 1
x = torch.randn(3, 2)
package = _ExportPackage()
self.assertEqual(
package._exporter("fn", fn)(x),
fn(x),
)
self.assertEqual(len(package.methods), 1)
self.assertEqual(len(package.methods["fn"].fallbacks), 1)
self.assertEqual(len(package.methods["fn"].overloads), 0)
def test_more_than_once(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return x + 1
x = torch.randn(3, 2)
package = _ExportPackage()
exporter = package._exporter("fn", fn)
exporter(x)
with self.assertRaisesRegex(
RuntimeError,
"Cannot export .* more than once",
):
exporter(x)
def test_error(self):
def fn(x: torch.Tensor) -> torch.Tensor:
return x + 1
x = torch.randn(3, 2)
package = _ExportPackage()
exporter = package._exporter("fn", fn, fallback="error")
with self.assertRaisesRegex(
RuntimeError,
"Cannot export fallback .* when fallback policy is set to 'error'",
):
exporter(x)
def test_overloads(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
if x.shape[0] == 4:
return x + 1
elif x.shape[0] == 3:
return x - 1
else:
return x + 2
fn = Module()
x = torch.randn(3, 2)
x2 = torch.randn(4, 2)
x3 = torch.randn(5, 2)
def spec(self, x):
assert x.shape[0] == 3
def spec2(self, x):
assert x.shape[0] == 4
def spec3(self, x):
assert x.shape[0] >= 5
return {"x": (Dim("batch", min=5), Dim.STATIC)}
package = _ExportPackage()
exporter = (
package._exporter("fn", fn)
._define_overload("spec", spec)
._define_overload("spec2", spec2)
._define_overload("spec3", spec3)
)
self.assertEqual(exporter(x), x - 1)
self.assertEqual(exporter(x2), x2 + 1)
self.assertEqual(exporter(x3), x3 + 2)
self.assertEqual(len(package.methods), 1)
self.assertEqual(len(package.methods["fn"].overloads), 3)
if __name__ == "__main__":
run_tests()
| TestPackage |
python | scipy__scipy | scipy/optimize/tests/test_minpack.py | {
"start": 7106,
"end": 8509
} | class ____:
def test_pressure_network_no_gradient(self):
# root/hybr without gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network, initial_guess,
method='hybr', args=(Qtot, k)).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient(self):
# root/hybr with gradient, equal pipes -> equal flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([[2., 0., 2., 0.]])
final_flows = optimize.root(pressure_network, initial_guess,
args=(Qtot, k), method='hybr',
jac=pressure_network_jacobian).x
assert_array_almost_equal(final_flows, np.ones(4))
def test_pressure_network_with_gradient_combined(self):
# root/hybr with gradient and function combined, equal pipes -> equal
# flows
k = np.full(4, 0.5)
Qtot = 4
initial_guess = array([2., 0., 2., 0.])
final_flows = optimize.root(pressure_network_fun_and_grad,
initial_guess, args=(Qtot, k),
method='hybr', jac=True).x
assert_array_almost_equal(final_flows, np.ones(4))
| TestRootHybr |
python | bokeh__bokeh | src/bokeh/models/annotations/dimensional.py | {
"start": 3319,
"end": 4203
} | class ____(Dimensional):
""" Model for defining metric units of measurement.
"""
# explicit __init__ to support Init signatures
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
base_unit = Required(String, help="""
The short name of the base unit, e.g. ``"m"`` for meters or ``"eV"`` for electron volts.
""")
full_unit = Nullable(String, default=None, help="""
The full name of the base unit, e.g. ``"meter"`` or ``"electronvolt"``.
""")
ticks = Override(default=[1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 125, 150, 200, 250, 500, 750])
def is_known(self, unit: str) -> bool:
prefixes = ["Q", "R", "Y", "Z", "E", "P", "T", "G", "M", "k", "h", "", "d", "c", "m", "µ", "n", "p", "f", "a", "z", "y", "r", "q"]
basis = {f"{prefix}{unit}" for prefix in prefixes}
return unit in basis
| Metric |
python | pytorch__pytorch | test/test_functional_optim.py | {
"start": 311,
"end": 686
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
torch.manual_seed(0)
self.lin1 = nn.Linear(3, 3, bias=False)
self.lin2 = nn.Linear(3, 3, bias=False)
def forward(self, t1):
return self.lin2(F.relu(self.lin1(t1)))
# dummy class to showcase custom optimizer registration with functional wrapper
| MyModule |
python | cython__cython | Cython/Compiler/Optimize.py | {
"start": 226982,
"end": 228260
} | class ____(Visitor.CythonTransform):
"""
This class facilitates the sharing of overflow checking among all nodes
of a nested arithmetic expression. For example, given the expression
a*b + c, where a, b, and x are all possibly overflowing ints, the entire
sequence will be evaluated and the overflow bit checked only at the end.
"""
overflow_bit_node = None
def visit_Node(self, node):
if self.overflow_bit_node is not None:
saved = self.overflow_bit_node
self.overflow_bit_node = None
self.visitchildren(node)
self.overflow_bit_node = saved
else:
self.visitchildren(node)
return node
def visit_NumBinopNode(self, node):
if node.overflow_check and node.overflow_fold:
top_level_overflow = self.overflow_bit_node is None
if top_level_overflow:
self.overflow_bit_node = node
else:
node.overflow_bit_node = self.overflow_bit_node
node.overflow_check = False
self.visitchildren(node)
if top_level_overflow:
self.overflow_bit_node = None
else:
self.visitchildren(node)
return node
| ConsolidateOverflowCheck |
python | huggingface__transformers | src/transformers/activations.py | {
"start": 7780,
"end": 8001
} | class ____(OrderedDict):
def __getitem__(self, key):
content = super().__getitem__(key)
cls, kwargs = content if isinstance(content, tuple) else (content, {})
return cls(**kwargs)
| ClassInstantier |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorRegistryDestinationDefinition.py | {
"start": 8170,
"end": 10357
} | class ____(BaseModel):
class Config:
extra = Extra.allow
destinationDefinitionId: UUID
name: str
dockerRepository: str
dockerImageTag: str
documentationUrl: str
icon: Optional[str] = None
iconUrl: Optional[str] = None
spec: Dict[str, Any]
tombstone: Optional[bool] = Field(
False,
description="if false, the configuration is active. if true, then this configuration is permanently off.",
)
public: Optional[bool] = Field(
False,
description="true if this connector definition is available to all workspaces",
)
custom: Optional[bool] = Field(
False, description="whether this is a custom connector definition"
)
releaseStage: Optional[ReleaseStage] = None
supportLevel: Optional[SupportLevel] = None
releaseDate: Optional[date] = Field(
None,
description="The date when this connector was first released, in yyyy-mm-dd format.",
)
tags: Optional[List[str]] = Field(
None,
description="An array of tags that describe the connector. E.g: language:python, keyword:rds, etc.",
)
resourceRequirements: Optional[ActorDefinitionResourceRequirements] = None
protocolVersion: Optional[str] = Field(
None, description="the Airbyte Protocol version supported by the connector"
)
normalizationConfig: Optional[NormalizationDestinationDefinitionConfig] = None
supportsDbt: Optional[bool] = Field(
None,
description="an optional flag indicating whether DBT is used in the normalization. If the flag value is NULL - DBT is not used.",
)
allowedHosts: Optional[AllowedHosts] = None
releases: Optional[ConnectorRegistryReleases] = None
ab_internal: Optional[AirbyteInternal] = None
supportsRefreshes: Optional[bool] = False
supportsFileTransfer: Optional[bool] = False
supportsDataActivation: Optional[bool] = False
generated: Optional[GeneratedFields] = None
packageInfo: Optional[ConnectorPackageInfo] = None
language: Optional[str] = Field(
None, description="The language the connector is written in"
)
| ConnectorRegistryDestinationDefinition |
python | django__django | django/contrib/gis/gdal/raster/band.py | {
"start": 439,
"end": 7836
} | class ____(GDALRasterBase):
"""
Wrap a GDAL raster band, needs to be obtained from a GDALRaster object.
"""
def __init__(self, source, index):
self.source = source
self._ptr = capi.get_ds_raster_band(source._ptr, index)
def _flush(self):
"""
Call the flush method on the Band's parent raster and force a refresh
of the statistics attribute when requested the next time.
"""
self.source._flush()
self._stats_refresh = True
@property
def description(self):
"""
Return the description string of the band.
"""
return force_str(capi.get_band_description(self._ptr))
@property
def width(self):
"""
Width (X axis) in pixels of the band.
"""
return capi.get_band_xsize(self._ptr)
@property
def height(self):
"""
Height (Y axis) in pixels of the band.
"""
return capi.get_band_ysize(self._ptr)
@property
def pixel_count(self):
"""
Return the total number of pixels in this band.
"""
return self.width * self.height
_stats_refresh = False
def statistics(self, refresh=False, approximate=False):
"""
Compute statistics on the pixel values of this band.
The return value is a tuple with the following structure:
(minimum, maximum, mean, standard deviation).
If approximate=True, the statistics may be computed based on overviews
or a subset of image tiles.
If refresh=True, the statistics will be computed from the data
directly, and the cache will be updated where applicable.
For empty bands (where all pixel values are nodata), all statistics
values are returned as None.
For raster formats using Persistent Auxiliary Metadata (PAM) services,
the statistics might be cached in an auxiliary file.
"""
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr,
c_int(approximate),
byref(smin),
byref(smax),
byref(smean),
byref(sstd),
c_void_p(),
c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
@property
def min(self):
"""
Return the minimum pixel value for this band.
"""
return self.statistics()[0]
@property
def max(self):
"""
Return the maximum pixel value for this band.
"""
return self.statistics()[1]
@property
def mean(self):
"""
Return the mean of all pixel values of this band.
"""
return self.statistics()[2]
@property
def std(self):
"""
Return the standard deviation of all pixel values of this band.
"""
return self.statistics()[3]
@property
def nodata_value(self):
"""
Return the nodata value for this band, or None if it isn't set.
"""
# Get value and nodata exists flag
nodata_exists = c_int()
value = capi.get_band_nodata_value(self._ptr, nodata_exists)
if not nodata_exists:
value = None
# If the pixeltype is an integer, convert to int
elif self.datatype() in GDAL_INTEGER_TYPES:
value = int(value)
return value
@nodata_value.setter
def nodata_value(self, value):
"""
Set the nodata value for this band.
"""
if value is None:
capi.delete_band_nodata_value(self._ptr)
elif not isinstance(value, (int, float)):
raise ValueError("Nodata value must be numeric or None.")
else:
capi.set_band_nodata_value(self._ptr, value)
self._flush()
def datatype(self, as_string=False):
"""
Return the GDAL Pixel Datatype for this band.
"""
dtype = capi.get_band_datatype(self._ptr)
if as_string:
dtype = GDAL_PIXEL_TYPES[dtype]
return dtype
def color_interp(self, as_string=False):
"""Return the GDAL color interpretation for this band."""
color = capi.get_band_color_interp(self._ptr)
if as_string:
color = GDAL_COLOR_TYPES[color]
return color
def data(self, data=None, offset=None, size=None, shape=None, as_memoryview=False):
"""
Read or writes pixel values for this band. Blocks of data can
be accessed by specifying the width, height and offset of the
desired block. The same specification can be used to update
parts of a raster by providing an array of values.
Allowed input data types are bytes, memoryview, list, tuple, and array.
"""
offset = offset or (0, 0)
size = size or (self.width - offset[0], self.height - offset[1])
shape = shape or size
if any(x <= 0 for x in size):
raise ValueError("Offset too big for this raster.")
if size[0] > self.width or size[1] > self.height:
raise ValueError("Size is larger than raster.")
# Create ctypes type array generator
ctypes_array = GDAL_TO_CTYPES[self.datatype()] * (shape[0] * shape[1])
if data is None:
# Set read mode
access_flag = 0
# Prepare empty ctypes array
data_array = ctypes_array()
else:
# Set write mode
access_flag = 1
# Instantiate ctypes array holding the input data
if isinstance(data, (bytes, memoryview)) or (
numpy and isinstance(data, numpy.ndarray)
):
data_array = ctypes_array.from_buffer_copy(data)
else:
data_array = ctypes_array(*data)
# Access band
capi.band_io(
self._ptr,
access_flag,
offset[0],
offset[1],
size[0],
size[1],
byref(data_array),
shape[0],
shape[1],
self.datatype(),
0,
0,
)
# Return data as numpy array if possible, otherwise as list
if data is None:
if as_memoryview:
return memoryview(data_array)
elif numpy:
# reshape() needs a reshape parameter with the height first.
return numpy.frombuffer(
data_array, dtype=numpy.dtype(data_array)
).reshape(tuple(reversed(size)))
else:
return list(data_array)
else:
self._flush()
| GDALBand |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-consecutive-values-you-can-make.py | {
"start": 33,
"end": 335
} | class ____(object):
def getMaximumConsecutive(self, coins):
"""
:type coins: List[int]
:rtype: int
"""
coins.sort()
result = 1
for c in coins:
if c > result:
break
result += c
return result
| Solution |
python | getsentry__sentry | src/sentry/api/endpoints/artifact_lookup.py | {
"start": 1817,
"end": 11046
} | class ____(ProjectEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectReleasePermission,)
def download_file(self, download_id, project: Project):
split = download_id.split("/")
if len(split) < 2:
return HttpResponseBadRequest(content=b"Invalid download ID")
ty, ty_id, *_rest = split
try:
ty_id = int(ty_id)
except ValueError:
return HttpResponseBadRequest(content=b"Invalid download ID")
rate_limited = ratelimits.backend.is_limited(
project=project,
key=f"rl:ArtifactLookupEndpoint:download:{download_id}:{project.id}",
limit=10,
)
if rate_limited:
logger.info(
"notification.rate_limited",
extra={"project_id": project.id, "file_id": download_id},
)
return HttpResponse({"Too many download requests"}, status=429)
file_m: ArtifactBundle | ReleaseFile | None = None
if ty == "artifact_bundle":
file_m = (
ArtifactBundle.objects.filter(
id=ty_id,
projectartifactbundle__project_id=project.id,
)
.select_related("file")
.first()
)
metrics.incr("sourcemaps.download.artifact_bundle")
elif ty == "release_file":
# NOTE: `ReleaseFile` does have a `project_id`, but that seems to
# be always empty, so using the `organization_id` instead.
file_m = (
ReleaseFile.objects.filter(id=ty_id, organization_id=project.organization.id)
.select_related("file")
.first()
)
metrics.incr("sourcemaps.download.release_file")
if file_m is None:
raise Http404
file = file_m.file
try:
fp = file.getfile()
response = StreamingHttpResponse(
iter(lambda: fp.read(4096), b""), content_type="application/octet-stream"
)
response["Content-Length"] = file.size
response["Content-Disposition"] = f'attachment; filename="{file.name}"'
return response
except OSError:
raise Http404
def get(self, request: Request, project: Project) -> Response:
"""
List a Project's Individual Artifacts or Bundles
````````````````````````````````````````
Retrieve a list of individual artifacts or artifact bundles for a given project.
:pparam string organization_id_or_slug: the id or slug of the organization to query.
:pparam string project_id_or_slug: the id or slug of the project to query.
:qparam string debug_id: if set, will query and return the artifact
bundle that matches the given `debug_id`.
:qparam string url: if set, will query and return all the individual
artifacts, or artifact bundles that contain files
that match the `url`. This is using a substring-match.
:qparam string release: used in conjunction with `url`.
:qparam string dist: used in conjunction with `url`.
:auth: required
"""
if (download_id := request.GET.get("download")) is not None:
if has_download_permission(request, project):
return self.download_file(download_id, project)
else:
return Response(status=403)
debug_id = request.GET.get("debug_id")
try:
debug_id = normalize_debug_id(debug_id)
except SymbolicError:
pass
url = request.GET.get("url") or NULL_STRING
release_name = request.GET.get("release") or NULL_STRING
dist_name = request.GET.get("dist") or NULL_STRING
# First query all the files:
# We first do that using the `ArtifactBundle` infrastructure.
artifact_bundles = query_artifact_bundles_containing_file(
project, release_name, dist_name, url, debug_id
)
all_bundles: dict[str, str] = {
f"artifact_bundle/{bundle_id}": resolved for bundle_id, resolved in artifact_bundles
}
# If no `ArtifactBundle`s were found matching the file, we fall back to
# looking up the file using the legacy `ReleaseFile` infrastructure.
individual_files: list[ReleaseFile] = []
if not artifact_bundles:
release, dist = try_resolve_release_dist(project, release_name, dist_name)
if release:
metrics.incr("sourcemaps.lookup.release_file")
releasefile_ids = list(get_legacy_release_bundles(release, dist))
for releasefile_id in releasefile_ids:
all_bundles[f"release_file/{releasefile_id}"] = "release-old"
individual_files = list(get_legacy_releasefile_by_file_url(release, dist, url))
maybe_renew_releasefiles(individual_files)
if releasefile_ids:
renew_releasefiles_by_id(releasefile_ids)
# Then: Construct our response
url_constructor = UrlConstructor(request, project)
found_artifacts: list[_Artifact] = []
for download_id, resolved_with in all_bundles.items():
found_artifacts.append(
{
"id": download_id,
"type": "bundle",
"url": url_constructor.url_for_file_id(download_id),
"resolved_with": resolved_with,
}
)
for release_file in individual_files:
download_id = f"release_file/{release_file.id}"
found_artifacts.append(
{
"id": download_id,
"type": "file",
"url": url_constructor.url_for_file_id(download_id),
# The `name` is the url/abs_path of the file,
# as in: `"~/path/to/file.min.js"`.
"abs_path": release_file.name,
# These headers should ideally include the `Sourcemap` reference
"headers": release_file.file.headers,
"resolved_with": "release-old",
}
)
# make sure we have a stable sort order for tests
def natural_sort(key: str) -> tuple[str, int]:
split = key.split("/")
if len(split) > 1:
ty, ty_id = split
return (ty, int(ty_id))
else:
return ("", int(split[0]))
found_artifacts.sort(key=lambda x: natural_sort(x["id"]))
# NOTE: We do not paginate this response, as we have very tight limits on all the individual queries.
return Response(serialize(found_artifacts, request.user))
def try_resolve_release_dist(
project: Project, release_name: str, dist_name: str
) -> tuple[Release | None, Distribution | None]:
release = None
dist = None
try:
release = Release.objects.get(
organization_id=project.organization_id,
projects=project,
version=release_name,
)
# We cannot query for dist without a release anyway
if dist_name:
dist = Distribution.objects.get(release=release, name=dist_name)
except (Release.DoesNotExist, Distribution.DoesNotExist):
pass
except Exception:
logger.exception("Failed to read")
return release, dist
def get_legacy_release_bundles(release: Release, dist: Distribution | None) -> set[int]:
return set(
ReleaseFile.objects.filter(
release_id=release.id,
dist_id=dist.id if dist else None,
# a `ReleaseFile` with `0` artifacts represents a release archive,
# see the comment above the definition of `artifact_count`.
artifact_count=0,
# similarly the special `type` is also used for release archives.
file__type=RELEASE_BUNDLE_TYPE,
).values_list("id", flat=True)
# TODO: this `order_by` might be incredibly slow
# we want to have a hard limit on the returned bundles here. and we would
# want to pick the most recently uploaded ones. that should mostly be
# relevant for customers that upload multiple bundles, or are uploading
# newer files for existing releases. In that case the symbolication is
# already degraded, so meh...
# .order_by("-file__timestamp")
[:MAX_BUNDLES_QUERY]
)
def get_legacy_releasefile_by_file_url(
release: Release, dist: Distribution | None, url: str
) -> QuerySet[ReleaseFile]:
# Exclude files which are also present in archive:
return (
ReleaseFile.public_objects.filter(
release_id=release.id,
dist_id=dist.id if dist else None,
)
.exclude(artifact_count=0)
.select_related("file")
).filter(name__icontains=url)[:MAX_RELEASEFILES_QUERY]
| ProjectArtifactLookupEndpoint |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/partition_status_cache.py | {
"start": 1609,
"end": 2412
} | class ____(Enum):
"""The status of asset partition."""
MATERIALIZED = "MATERIALIZED"
IN_PROGRESS = "IN_PROGRESS"
FAILED = "FAILED"
def is_cacheable_partition_type(partitions_def: PartitionsDefinition) -> bool:
check.inst_param(partitions_def, "partitions_def", PartitionsDefinition)
if not isinstance(partitions_def, CACHEABLE_PARTITION_TYPES):
return False
if isinstance(partitions_def, MultiPartitionsDefinition):
return all(
is_cacheable_partition_type(dimension_def.partitions_def)
for dimension_def in partitions_def.partitions_defs
)
return (
partitions_def.name is not None
if isinstance(partitions_def, DynamicPartitionsDefinition)
else True
)
@whitelist_for_serdes
| AssetPartitionStatus |
python | dask__distributed | distributed/comm/tcp.py | {
"start": 26285,
"end": 26391
} | class ____(BaseTCPBackend):
_connector_class = TCPConnector
_listener_class = TCPListener
| TCPBackend |
python | openai__openai-python | tests/test_transform.py | {
"start": 4755,
"end": 4854
} | class ____(TypedDict, total=False):
foo: Annotated[date, PropertyInfo(format="iso8601")]
| DateDict |
python | apache__airflow | providers/imap/src/airflow/providers/imap/hooks/imap.py | {
"start": 1362,
"end": 12152
} | class ____(BaseHook):
"""
This hook connects to a mail server by using the imap protocol.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:param imap_conn_id: The :ref:`imap connection id <howto/connection:imap>`
that contains the information used to authenticate the client.
"""
conn_name_attr = "imap_conn_id"
default_conn_name = "imap_default"
conn_type = "imap"
hook_name = "IMAP"
def __init__(self, imap_conn_id: str = default_conn_name) -> None:
super().__init__()
self.imap_conn_id = imap_conn_id
self.mail_client: imaplib.IMAP4_SSL | imaplib.IMAP4 | None = None
def __enter__(self) -> ImapHook:
return self.get_conn()
def __exit__(self, exc_type, exc_val, exc_tb):
self.mail_client.logout()
def get_conn(self) -> ImapHook:
"""
Login to the mail server.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:return: an authorized ImapHook object.
"""
if not self.mail_client:
conn = self.get_connection(self.imap_conn_id)
self.mail_client = self._build_client(conn) # type: ignore[arg-type]
if conn.login and conn.password:
self.mail_client.login(conn.login, conn.password)
return self
def _build_client(self, conn: Connection) -> imaplib.IMAP4_SSL | imaplib.IMAP4:
mail_client: imaplib.IMAP4_SSL | imaplib.IMAP4
host = conn.host or ""
use_ssl = conn.extra_dejson.get("use_ssl", True)
if use_ssl:
from airflow.configuration import conf
extra_ssl_context = conn.extra_dejson.get("ssl_context", None)
if extra_ssl_context:
ssl_context_string = extra_ssl_context
else:
ssl_context_string = conf.get("imap", "SSL_CONTEXT", fallback=None)
if ssl_context_string is None:
ssl_context_string = conf.get("email", "SSL_CONTEXT", fallback=None)
if ssl_context_string is None:
ssl_context_string = "default"
if ssl_context_string == "default":
ssl_context = ssl.create_default_context()
elif ssl_context_string == "none":
ssl_context = None
else:
raise RuntimeError(
f"The email.ssl_context configuration variable must "
f"be set to 'default' or 'none' and is '{ssl_context_string}'."
)
if conn.port:
mail_client = imaplib.IMAP4_SSL(host, conn.port, ssl_context=ssl_context)
else:
mail_client = imaplib.IMAP4_SSL(host, ssl_context=ssl_context)
else:
if conn.port:
mail_client = imaplib.IMAP4(host, conn.port)
else:
mail_client = imaplib.IMAP4(host)
return mail_client
def has_mail_attachment(
self, name: str, *, check_regex: bool = False, mail_folder: str = "INBOX", mail_filter: str = "All"
) -> bool:
"""
Check the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:param check_regex: Checks the name for a regular expression.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:returns: True if there is an attachment with the given name and False if not.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, True, mail_folder, mail_filter
)
return bool(mail_attachments)
def retrieve_mail_attachments(
self,
name: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> list[tuple]:
"""
Retrieve mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only retrieve the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:returns: a list of tuple each containing the attachment filename and its payload.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments
def download_mail_attachments(
self,
name: str,
local_output_directory: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> None:
"""
Download mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only download the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory)
def _handle_not_found_mode(self, not_found_mode: str) -> None:
if not_found_mode not in ("raise", "warn", "ignore"):
self.log.error('Invalid "not_found_mode" %s', not_found_mode)
elif not_found_mode == "raise":
raise AirflowException("No mail attachments found!")
elif not_found_mode == "warn":
self.log.warning("No mail attachments found!")
def _retrieve_mails_attachments_by_name(
self, name: str, check_regex: bool, latest_only: bool, mail_folder: str, mail_filter: str
) -> list:
if not self.mail_client:
raise RuntimeError("The 'mail_client' should be initialized before!")
all_matching_attachments = []
self.mail_client.select(mail_folder)
for mail_id in self._list_mail_ids_desc(mail_filter):
response_mail_body = self._fetch_mail_body(mail_id)
matching_attachments = self._check_mail_body(response_mail_body, name, check_regex, latest_only)
if matching_attachments:
all_matching_attachments.extend(matching_attachments)
if latest_only:
break
self.mail_client.close()
return all_matching_attachments
def _list_mail_ids_desc(self, mail_filter: str) -> Iterable[str]:
if not self.mail_client:
raise RuntimeError("The 'mail_client' should be initialized before!")
_, data = self.mail_client.search(None, mail_filter)
mail_ids = data[0].split()
return reversed(mail_ids)
def _fetch_mail_body(self, mail_id: str) -> str:
if not self.mail_client:
raise RuntimeError("The 'mail_client' should be initialized before!")
_, data = self.mail_client.fetch(mail_id, "(RFC822)")
mail_body = data[0][1] # type: ignore # The mail body is always in this specific location
mail_body_str = mail_body.decode("utf-8") # type: ignore
return mail_body_str
def _check_mail_body(
self, response_mail_body: str, name: str, check_regex: bool, latest_only: bool
) -> list[tuple[Any, Any]]:
mail = Mail(response_mail_body)
if mail.has_attachments():
return mail.get_attachments_by_name(name, check_regex, find_first=latest_only)
return []
def _create_files(self, mail_attachments: list, local_output_directory: str) -> None:
for name, payload in mail_attachments:
if self._is_symlink(name):
self.log.error("Can not create file because it is a symlink!")
elif self._is_escaping_current_directory(name):
self.log.error("Can not create file because it is escaping the current directory!")
else:
self._create_file(name, payload, local_output_directory)
def _is_symlink(self, name: str) -> bool:
# IMPORTANT NOTE: os.path.islink is not working for windows symlinks
# See: https://stackoverflow.com/a/11068434
return os.path.islink(name)
def _is_escaping_current_directory(self, name: str) -> bool:
return "../" in name
def _correct_path(self, name: str, local_output_directory: str) -> str:
return (
local_output_directory + name
if local_output_directory.endswith("/")
else local_output_directory + "/" + name
)
def _create_file(self, name: str, payload: Any, local_output_directory: str) -> None:
file_path = self._correct_path(name, local_output_directory)
with open(file_path, "wb") as file:
file.write(payload)
| ImapHook |
python | modin-project__modin | modin/core/io/column_stores/parquet_dispatcher.py | {
"start": 1579,
"end": 6124
} | class ____:
"""
Base class that encapsulates Parquet engine-specific details.
This class exposes a set of functions that are commonly used in the
`read_parquet` implementation.
Attributes
----------
path : str, path object or file-like object
The filepath of the parquet file in local filesystem or hdfs.
storage_options : dict
Parameters for specific storage engine.
_fs_path : str, path object or file-like object
The filepath or handle of the parquet dataset specific to the
filesystem implementation. E.g. for `s3://test/example`, _fs
would be set to S3FileSystem and _fs_path would be `test/example`.
_fs : Filesystem
Filesystem object specific to the given parquet file/dataset.
dataset : ParquetDataset or ParquetFile
Underlying dataset implementation for PyArrow and fastparquet
respectively.
"""
def __init__(self, path, storage_options): # noqa : PR01
self.path = path.__fspath__() if isinstance(path, os.PathLike) else path
self.storage_options = storage_options
self._fs_path = None
self._fs = None
self.dataset = self._init_dataset()
@property
def pandas_metadata(self):
"""Return the pandas metadata of the dataset."""
raise NotImplementedError
@property
def columns(self):
"""Return the list of columns in the dataset."""
raise NotImplementedError
@property
def engine(self):
"""Return string representing what engine is being used."""
raise NotImplementedError
@functools.cached_property
def files(self):
"""Return the list of formatted file paths of the dataset."""
raise NotImplementedError
@functools.cached_property
def row_groups_per_file(self):
"""Return a list with the number of row groups per file."""
raise NotImplementedError
@property
def fs(self):
"""
Return the filesystem object associated with the dataset path.
Returns
-------
filesystem
Filesystem object.
"""
if self._fs is None:
if isinstance(self.path, AbstractBufferedFile):
self._fs = self.path.fs
else:
self._fs, self._fs_path = url_to_fs(self.path, **self.storage_options)
return self._fs
@property
def fs_path(self):
"""
Return the filesystem-specific path or file handle.
Returns
-------
fs_path : str, path object or file-like object
String path specific to filesystem or a file handle.
"""
if self._fs_path is None:
if isinstance(self.path, AbstractBufferedFile):
self._fs_path = self.path
else:
self._fs, self._fs_path = url_to_fs(self.path, **self.storage_options)
return self._fs_path
def to_pandas_dataframe(self, columns):
"""
Read the given columns as a pandas dataframe.
Parameters
----------
columns : list
List of columns that should be read from file.
"""
raise NotImplementedError
def _get_files(self, files):
"""
Retrieve list of formatted file names in dataset path.
Parameters
----------
files : list
List of files from path.
Returns
-------
fs_files : list
List of files from path with fs-protocol prepended.
"""
# Older versions of fsspec doesn't support unstrip_protocol(). It
# was only added relatively recently:
# https://github.com/fsspec/filesystem_spec/pull/828
def _unstrip_protocol(protocol, path):
protos = (protocol,) if isinstance(protocol, str) else protocol
for protocol in protos:
if path.startswith(f"{protocol}://"):
return path
return f"{protos[0]}://{path}"
if isinstance(self.path, AbstractBufferedFile):
return [self.path]
# version.parse() is expensive, so we can split this into two separate loops
if version.parse(fsspec.__version__) < version.parse("2022.5.0"):
fs_files = [_unstrip_protocol(self.fs.protocol, fpath) for fpath in files]
else:
fs_files = [self.fs.unstrip_protocol(fpath) for fpath in files]
return fs_files
@_inherit_docstrings(ColumnStoreDataset)
| ColumnStoreDataset |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 361816,
"end": 362929
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateProjectV2"""
__schema__ = github_schema
__field_names__ = ("project_id", "title", "short_description", "readme", "closed", "public", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project to update."""
title = sgqlc.types.Field(String, graphql_name="title")
"""Set the title of the project."""
short_description = sgqlc.types.Field(String, graphql_name="shortDescription")
"""Set the short description of the project."""
readme = sgqlc.types.Field(String, graphql_name="readme")
"""Set the readme description of the project."""
closed = sgqlc.types.Field(Boolean, graphql_name="closed")
"""Set the project to closed or open."""
public = sgqlc.types.Field(Boolean, graphql_name="public")
"""Set the project to public or private."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateProjectV2Input |
python | apache__airflow | providers/cloudant/src/airflow/providers/cloudant/hooks/cloudant.py | {
"start": 1121,
"end": 3492
} | class ____(BaseHook):
"""
Interact with Cloudant. This class is a thin wrapper around the cloudant python library.
.. seealso:: the latest documentation `here <https://python-cloudant.readthedocs.io/en/latest/>`_.
:param cloudant_conn_id: The connection id to authenticate and get a session object from cloudant.
"""
conn_name_attr = "cloudant_conn_id"
default_conn_name = "cloudant_default"
conn_type = "cloudant"
hook_name = "Cloudant"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "extra"],
"relabeling": {"host": "Account", "login": "Username (or API Key)"},
}
def __init__(self, cloudant_conn_id: str = default_conn_name) -> None:
super().__init__()
self.cloudant_conn_id = cloudant_conn_id
def get_conn(self) -> CloudantV1:
"""
Create an authenticated service object for connection to the Cloudant service.
.. note::
In the connection form:
- 'host' equals the 'Account' (required)
- 'login' equals the 'Username (or API Key)' (required)
- 'password' equals the 'Password' (required)
:return: a CloudantV1 service object backed by a session-based user/password authenticator.
"""
conn = self.get_connection(self.cloudant_conn_id)
self._validate_connection(conn) # type: ignore[arg-type]
if conn.login and conn.password:
authenticator = CouchDbSessionAuthenticator(username=conn.login, password=conn.password)
service = CloudantV1(authenticator=authenticator)
service.set_service_url(f"https://{conn.host}.cloudant.com")
return service
raise AirflowException("Missing login or password in Cloudant connection.")
@staticmethod
def _validate_connection(conn: Connection) -> None:
missing_params = []
for conn_param in ["host", "login", "password"]:
if not getattr(conn, conn_param):
missing_params.append(conn_param)
if missing_params:
raise AirflowException(
f"Missing connection parameter{'s' if len(missing_params) > 1 else ''}: {', '.join(missing_params)}"
)
| CloudantHook |
python | google__pytype | pytype/load_pytd_test.py | {
"start": 1554,
"end": 30104
} | class ____(_LoaderTest):
"""Tests for load_pytd.py."""
def test_filepath_to_module(self):
# (filename, pythonpath, expected)
test_cases = [
("foo/bar/baz.py", [""], "foo.bar.baz"),
("foo/bar/baz.py", ["foo"], "bar.baz"),
("foo/bar/baz.py", ["fo"], "foo.bar.baz"),
("foo/bar/baz.py", ["foo/"], "bar.baz"),
("foo/bar/baz.py", ["foo", "bar"], "bar.baz"),
("foo/bar/baz.py", ["foo/bar", "foo"], "baz"),
("foo/bar/baz.py", ["foo", "foo/bar"], "bar.baz"),
("./foo/bar.py", [""], "foo.bar"),
("./foo.py", [""], "foo"),
("../foo.py", [""], None),
("../foo.py", ["."], None),
("foo/bar/../baz.py", [""], "foo.baz"),
("../foo.py", [".."], "foo"),
("../../foo.py", ["../.."], "foo"),
("../../foo.py", [".."], None),
]
replaced_test_cased = []
for a, b, c in test_cases:
replaced_test_cased.append((
file_utils.replace_separator(a),
list(map(file_utils.replace_separator, b)),
c,
))
test_cases = replaced_test_cased
for filename, pythonpath, expected in test_cases:
module = module_utils.get_module_name(filename, pythonpath)
self.assertEqual(module, expected)
def test_builtin_sys(self):
with self._setup_loader() as loader:
ast = loader.import_name("sys")
self.assertTrue(ast.Lookup("sys.exit"))
def test_basic(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("path/to/some/module.pyi"),
"def foo(x:int) -> str: ...",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
ast = loader.import_name("path.to.some.module")
self.assertTrue(ast.Lookup("path.to.some.module.foo"))
def test_path(self):
with test_utils.Tempdir() as d1:
with test_utils.Tempdir() as d2:
d1.create_file(
file_utils.replace_separator("dir1/module1.pyi"),
"def foo1() -> str: ...",
)
d2.create_file(
file_utils.replace_separator("dir2/module2.pyi"),
"def foo2() -> str: ...",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=f"{d1.path}{os.pathsep}{d2.path}",
)
)
module1 = loader.import_name("dir1.module1")
module2 = loader.import_name("dir2.module2")
self.assertTrue(module1.Lookup("dir1.module1.foo1"))
self.assertTrue(module2.Lookup("dir2.module2.foo2"))
def test_init(self):
with test_utils.Tempdir() as d1:
d1.create_file(
file_utils.replace_separator("baz/__init__.pyi"),
"x = ... # type: int",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d1.path,
)
)
self.assertTrue(loader.import_name("baz").Lookup("baz.x"))
def test_builtins(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", "x = ... # type: int")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
mod = loader.import_name("foo")
self.assertEqual("builtins.int", mod.Lookup("foo.x").type.cls.name)
self.assertEqual("builtins.int", mod.Lookup("foo.x").type.name)
def test_no_init(self):
with test_utils.Tempdir() as d:
d.create_directory("baz")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
self.assertTrue(loader.import_name("baz"))
def test_no_init_imports_map(self):
with test_utils.Tempdir() as d:
d.create_directory("baz")
with file_utils.cd(d.path):
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath="",
)
)
loader.options.tweak(imports_map=imports_map.ImportsMap())
self.assertFalse(loader.import_name("baz"))
def test_stdlib(self):
with self._setup_loader() as loader:
ast = loader.import_name("io")
self.assertTrue(ast.Lookup("io.StringIO"))
def test_deep_dependency(self):
with test_utils.Tempdir() as d:
d.create_file("module1.pyi", "def get_bar() -> module2.Bar: ...")
d.create_file("module2.pyi", "class Bar:\n pass")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
module1 = loader.import_name("module1")
(f,) = module1.Lookup("module1.get_bar").signatures
self.assertEqual("module2.Bar", f.return_type.cls.name)
def test_circular_dependency(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def get_bar() -> bar.Bar: ...
class Foo:
pass
""",
)
d.create_file(
"bar.pyi",
"""
def get_foo() -> foo.Foo: ...
class Bar:
pass
""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
foo = loader.import_name("foo")
bar = loader.import_name("bar")
(f1,) = foo.Lookup("foo.get_bar").signatures
(f2,) = bar.Lookup("bar.get_foo").signatures
self.assertEqual("bar.Bar", f1.return_type.cls.name)
self.assertEqual("foo.Foo", f2.return_type.cls.name)
def test_circular_dependency_complicated(self):
# The dependency graph looks like:
# target ----------
# | |
# v v
# dep1 -> dep2 -> dep3
# ^ |
# | |
# -----------------
with self._setup_loader(
target="""
from dep1 import PathLike
from dep3 import AnyPath
def abspath(path: PathLike[str]) -> str: ...
""",
dep1="""
from dep2 import Popen
from typing import Generic, TypeVar
_T = TypeVar('_T')
class PathLike(Generic[_T]): ...
""",
dep2="""
from dep3 import AnyPath
class Popen: ...
""",
dep3="""
from dep1 import PathLike
AnyPath = PathLike[str]
""",
) as loader:
loader.finish_and_verify_ast(
loader.load_file(
"target",
path_utils.join(loader.options.pythonpath[0], "target.pyi"),
)
)
def test_circular_dependency_with_type_param(self):
with test_utils.Tempdir() as d:
d.create_file(
"bar.pyi",
"""
from typing import Callable, ParamSpec
from foo import Foo
_P = ParamSpec("_P")
class Bar:
foo: Foo | None
def bar(obj: Callable[_P, None], /, *args: _P.args, **kwargs: _P.kwargs) -> Bar: ...
""",
)
d.create_file(
"foo.pyi",
"""
from bar import bar as _bar
class Foo: ...
bar = _bar
""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
bar = loader.import_name("bar")
foo = loader.import_name("foo")
self.assertTrue(bar.Lookup("bar.bar"))
self.assertTrue(foo.Lookup("foo.bar"))
def test_cache(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", "def get_bar() -> bar.Bar: ...")
d.create_file("bar.pyi", "class Bar:\n pass")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
loader.import_name("bar")
d.delete_file("bar.pyi")
foo = loader.import_name("foo")
(f,) = foo.Lookup("foo.get_bar").signatures
self.assertEqual("bar.Bar", f.return_type.cls.name)
def test_remove(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", "def get_bar() -> bar.Bar: ...")
d.create_file("bar.pyi", "class Bar:\n pass")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
bar = loader.import_name("bar")
self.assertTrue(bar.Lookup("bar.Bar"))
d.delete_file("bar.pyi")
loader.remove_name("bar")
with self.assertRaisesRegex(load_pytd.BadDependencyError, "bar"):
loader.import_name("foo")
def test_relative(self):
with test_utils.Tempdir() as d:
d.create_file("__init__.pyi", "base = ... # type: str")
d.create_file(
file_utils.replace_separator("path/__init__.pyi"),
"path = ... # type: str",
)
d.create_file(
file_utils.replace_separator("path/to/__init__.pyi"),
"to = ... # type: str",
)
d.create_file(
file_utils.replace_separator("path/to/some/__init__.pyi"),
"some = ... # type: str",
)
d.create_file(file_utils.replace_separator("path/to/some/module.pyi"), "")
loader = load_pytd.Loader(
config.Options.create(
module_name="path.to.some.module",
python_version=self.python_version,
pythonpath=d.path,
)
)
some = loader.import_relative(1)
to = loader.import_relative(2)
path = loader.import_relative(3)
# Python doesn't allow "...." here, so don't test import_relative(4).
self.assertTrue(some.Lookup("path.to.some.some"))
self.assertTrue(to.Lookup("path.to.to"))
self.assertTrue(path.Lookup("path.path"))
def test_typeshed(self):
with self._setup_loader() as loader:
self.assertTrue(loader.import_name("urllib.request"))
def test_prefer_typeshed(self):
with test_utils.Tempdir() as d:
# Override two modules from typeshed
d.create_file(
file_utils.replace_separator("typing_extensions/__init__.pyi"),
"foo: str = ...",
)
d.create_file(
file_utils.replace_separator("crypt/__init__.pyi"), "foo: str = ..."
)
loader = load_pytd.Loader(
config.Options.create(
module_name="x",
python_version=self.python_version,
pythonpath=d.path,
)
)
# typing_extensions should ignore the override, crypt should not.
ast1 = loader.import_name("typing_extensions")
ast2 = loader.import_name("crypt")
self.assertTrue(ast1.Lookup("typing_extensions.Literal"))
self.assertTrue(ast2.Lookup("crypt.foo"))
with self.assertRaises(KeyError):
ast1.Lookup("typing_extensions.foo")
with self.assertRaises(KeyError):
ast2.Lookup("crypt.crypt")
def test_resolve_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"module1.pyi",
"""
from typing import List
x = List[int]
""",
)
d.create_file(
"module2.pyi",
"""
def f() -> module1.x: ...
""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath=d.path,
)
)
module2 = loader.import_name("module2")
(f,) = module2.Lookup("module2.f").signatures
self.assertEqual("list[int]", pytd_utils.Print(f.return_type))
def test_import_map_congruence(self):
with test_utils.Tempdir() as d:
foo_path = d.create_file("foo.pyi", "class X: ...")
bar_path = d.create_file("bar.pyi", "X = ... # type: another.foo.X")
# Map the same pyi file under two module paths.
null_device = "/dev/null" if sys.platform != "win32" else "NUL"
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath="",
)
)
loader.options.tweak(
imports_map=imports_map.ImportsMap(
items={
"foo": foo_path,
file_utils.replace_separator("another/foo"): foo_path,
"bar": bar_path,
"empty1": null_device,
"empty2": null_device,
}
)
)
normal = loader.import_name("foo")
self.assertEqual("foo", normal.name)
loader.import_name("bar") # check that we can resolve against another.foo
another = loader.import_name("another.foo")
# We do *not* treat foo.X and another.foo.X the same, because Python
# doesn't, either:
self.assertIsNot(normal, another)
self.assertTrue([c.name.startswith("foo") for c in normal.classes])
self.assertTrue(
[c.name.startswith("another.foo") for c in another.classes]
)
# Make sure that multiple modules using /dev/null are not treated as
# congruent.
empty1 = loader.import_name("empty1")
empty2 = loader.import_name("empty2")
self.assertIsNot(empty1, empty2)
self.assertEqual("empty1", empty1.name)
self.assertEqual("empty2", empty2.name)
def test_unused_imports_map_paths(self):
with test_utils.Tempdir() as d:
foo_path = d.create_file("foo.pyi", "class Foo: ...")
bar_path = d.create_file("bar.pyi", "bar: foo.Foo = ...")
baz_path = d.create_file("baz.pyi", "class Baz: ...")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
pythonpath="",
)
)
loader.options.tweak(
imports_map=imports_map.ImportsMap(
items={
"foo": foo_path,
"bar": bar_path,
"baz": baz_path,
file_utils.replace_separator("aliased/baz"): baz_path,
}
)
)
self.assertEqual(
{foo_path, bar_path, baz_path},
loader.get_unused_imports_map_paths(),
)
_ = loader.import_name("bar")
# Importing bar will access its upstream dependency foo.
self.assertEqual(
{baz_path},
loader.get_unused_imports_map_paths(),
)
_ = loader.import_name("foo")
self.assertEqual(
{baz_path},
loader.get_unused_imports_map_paths(),
)
_ = loader.import_name("aliased.baz")
self.assertFalse(loader.get_unused_imports_map_paths())
def test_package_relative_import(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("pkg/foo.pyi"), "class X: ...")
d.create_file(
file_utils.replace_separator("pkg/bar.pyi"),
"""
from .foo import X
y = ... # type: X""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="pkg.bar",
python_version=self.python_version,
pythonpath=d.path,
)
)
bar = loader.import_name("pkg.bar")
f = bar.Lookup("pkg.bar.y")
self.assertEqual("pkg.foo.X", f.type.name)
def test_directory_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("pkg/sub/__init__.pyi"),
"""
from .foo import *
from .bar import *""",
)
d.create_file(
file_utils.replace_separator("pkg/sub/foo.pyi"),
"""
class X: pass""",
)
d.create_file(
file_utils.replace_separator("pkg/sub/bar.pyi"),
"""
from .foo import X
y = ... # type: X""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="pkg",
python_version=self.python_version,
pythonpath=d.path,
)
)
ast = loader.import_name("pkg.sub")
self.assertTrue(ast.Lookup("pkg.sub.X"))
def test_diamond_import(self):
"""Should not fail on importing a module via two paths."""
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("pkg/sub/__init__.pyi"),
"""
from .foo import *
from .bar import *""",
)
d.create_file(
file_utils.replace_separator("pkg/sub/foo.pyi"),
"""
from .baz import X""",
)
d.create_file(
file_utils.replace_separator("pkg/sub/bar.pyi"),
"""
from .baz import X""",
)
d.create_file(
file_utils.replace_separator("pkg/sub/baz.pyi"),
"""
class X: ...""",
)
loader = load_pytd.Loader(
config.Options.create(
module_name="pkg",
python_version=self.python_version,
pythonpath=d.path,
)
)
ast = loader.import_name("pkg.sub")
self.assertTrue(ast.Lookup("pkg.sub.X"))
def test_get_resolved_modules(self):
with test_utils.Tempdir() as d:
filename = d.create_file(
file_utils.replace_separator("dir/module.pyi"),
"def foo() -> str: ...",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
ast = loader.import_name("dir.module")
modules = loader.get_resolved_modules()
self.assertEqual(set(modules), {"builtins", "typing", "dir.module"})
module = modules["dir.module"]
self.assertEqual(module.module_name, "dir.module")
self.assertEqual(module.filename, filename)
self.assertEqual(module.ast, ast)
def test_circular_import(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("os2/__init__.pyi"),
"""
from . import path as path
_PathType = path._PathType
def utime(path: _PathType) -> None: ...
class stat_result: ...
""",
)
d.create_file(
file_utils.replace_separator("os2/path.pyi"),
"""
import os2
_PathType = bytes
def samestat(stat1: os2.stat_result) -> bool: ...
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
ast = loader.import_name("os2.path")
self.assertEqual(
ast.Lookup("os2.path._PathType").type.name, "builtins.bytes"
)
def test_circular_import_with_external_type(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("os2/__init__.pyi"),
"""
from posix2 import stat_result as stat_result
from . import path as path
_PathType = path._PathType
def utime(path: _PathType) -> None: ...
""",
)
d.create_file(
file_utils.replace_separator("os2/path.pyi"),
"""
import os2
_PathType = bytes
def samestate(stat1: os2.stat_result) -> bool: ...
""",
)
d.create_file("posix2.pyi", "class stat_result: ...")
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
# Make sure all three modules were resolved properly.
loader.import_name("os2")
loader.import_name("os2.path")
loader.import_name("posix2")
def test_union_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"test.pyi",
"""
from typing import Union as _UnionT
x: _UnionT[int, str]
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
ast = loader.import_name("test")
x = ast.Lookup("test.x")
self.assertIsInstance(x.type, pytd.UnionType)
def test_optional_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"test.pyi",
"""
from typing import Optional as _OptionalT
x: _OptionalT[int]
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
ast = loader.import_name("test")
x = ast.Lookup("test.x")
self.assertIsInstance(x.type, pytd.UnionType)
def test_intersection_alias(self):
with test_utils.Tempdir() as d:
d.create_file(
"test.pyi",
"""
from typing import Intersection as _IntersectionT
x: _IntersectionT[int, str]
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
ast = loader.import_name("test")
x = ast.Lookup("test.x")
self.assertIsInstance(x.type, pytd.IntersectionType)
def test_open_function(self):
def mock_open(*unused_args, **unused_kwargs):
return io.StringIO("x: int")
loader = load_pytd.Loader(
config.Options.create(
module_name="base",
python_version=self.python_version,
open_function=mock_open,
)
)
a = loader.load_file("a", "a.pyi")
self.assertEqual("int", pytd_utils.Print(a.Lookup("a.x").type))
def test_submodule_reexport(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/bar.pyi"), "")
d.create_file(
file_utils.replace_separator("foo/__init__.pyi"),
"""
from . import bar as bar
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
foo = loader.import_name("foo")
self.assertEqual(pytd_utils.Print(foo), "import foo.bar")
def test_submodule_rename(self):
with test_utils.Tempdir() as d:
d.create_file(file_utils.replace_separator("foo/bar.pyi"), "")
d.create_file(
file_utils.replace_separator("foo/__init__.pyi"),
"""
from . import bar as baz
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
foo = loader.import_name("foo")
self.assertEqual(pytd_utils.Print(foo), "from foo import bar as foo.baz")
def test_typing_reexport(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo.pyi"),
"""
from typing import List as List
""",
)
d.create_file(
file_utils.replace_separator("bar.pyi"),
"""
from foo import *
def f() -> List[int]: ...
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
foo = loader.import_name("foo")
bar = loader.import_name("bar")
self.assertEqual(
pytd_utils.Print(foo), "from builtins import list as List"
)
self.assertEqual(
pytd_utils.Print(bar),
textwrap.dedent("""
from builtins import list as List
def bar.f() -> list[int]: ...
""").strip(),
)
def test_reuse_builtin_name(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
class Ellipsis: ...
""",
)
d.create_file(
"bar.pyi",
"""
from foo import *
def f(x: Ellipsis): ...
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
loader.import_name("foo")
bar = loader.import_name("bar")
self.assertEqual(
pytd_utils.Print(bar.Lookup("bar.f")),
"def bar.f(x: foo.Ellipsis) -> Any: ...",
)
def test_import_typevar(self):
# Regression test for the loader crashing with a
# "Duplicate top level items: 'T', 'T'" error.
self._import(
a="""
from typing import TypeVar
T = TypeVar('T')
""",
b="""
from a import T
def f(x: T) -> T: ...
""",
c="""
from b import *
""",
)
def test_import_class_from_parent_module(self):
with test_utils.Tempdir() as d:
d.create_file(
file_utils.replace_separator("foo/__init__.pyi"), "class Foo: ..."
)
d.create_file(
file_utils.replace_separator("foo/bar.pyi"),
"""
from . import Foo
class Bar(Foo): ...
""",
)
loader = load_pytd.Loader(
config.Options.create(
python_version=self.python_version, pythonpath=d.path
)
)
loader.import_name("foo.bar")
def test_module_alias(self):
ast = self._import(foo="""
import subprocess as _subprocess
x: _subprocess.Popen
""")
expected = textwrap.dedent("""
import subprocess as foo._subprocess
foo.x: foo._subprocess.Popen
""").strip()
self.assertMultiLineEqual(pytd_utils.Print(ast), expected)
def test_star_import_in_circular_dep(self):
stub3_ast = self._import(
stub1="""
from stub2 import Foo
from typing import Mapping as Mapping
""",
stub2="""
from stub3 import Mapping
class Foo: ...
""",
stub3="""
from stub1 import *
""",
)
self.assertEqual(
stub3_ast.Lookup("stub3.Foo").type, pytd.ClassType("stub2.Foo")
)
self.assertEqual(
stub3_ast.Lookup("stub3.Mapping").type, pytd.ClassType("typing.Mapping")
)
def test_import_all(self):
ast = self._import(
foo="__all__ = ['foo']",
bar="__all__ = ['bar']",
baz="""
from foo import *
from bar import *
""",
)
self.assertFalse(ast.aliases)
def test_import_private_typevar(self):
ast = self._import(
foo="""
from typing import TypeVar
_T = TypeVar('_T')
""",
bar="""
from typing import TypeVar
_T = TypeVar('_T')
""",
baz="""
from foo import *
from bar import *
""",
)
self.assertFalse(ast.type_params)
def test_use_class_alias(self):
ast = self._import(foo="""
class A:
class B: ...
x: A2.B
A2 = A
""")
a = ast.Lookup("foo.A")
self.assertEqual(a.Lookup("x").type.cls, a.Lookup("foo.A.B"))
def test_alias_typevar(self):
ast = self._import(foo="""
from typing import TypeVar as _TypeVar
T = _TypeVar('T')
""")
self.assertEqual(
ast.Lookup("foo.T"), pytd.TypeParameter(name="T", scope="foo")
)
def test_alias_property_with_setter(self):
ast = self._import(foo="""
class X:
@property
def f(self) -> int: ...
@f.setter
def f(self, value: int) -> None: ...
g = f
""")
x = ast.Lookup("foo.X")
self.assertEqual(
pytd_utils.Print(x.Lookup("f")), "f: Annotated[int, 'property']"
)
self.assertEqual(
pytd_utils.Print(x.Lookup("g")), "g: Annotated[int, 'property']"
)
def test_typing_alias(self):
# typing._Alias is a typeshed construct.
ast = self._import(foo="""
from typing import _Alias, TypeAlias
X = _Alias()
Y: TypeAlias = _Alias()
""")
self.assertEqual(
pytd_utils.Print(ast), "from typing import _Alias as X, _Alias as Y"
)
| ImportPathsTest |
python | pytorch__pytorch | tools/linter/adapters/test_device_bias_linter.py | {
"start": 636,
"end": 2401
} | class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
DEVICE_BIAS = ["cuda", "xpu", "mps"]
GPU_RELATED_DECORATORS = {"requires_gpu", "requires_triton"}
def is_main_has_gpu(tree: ast.AST) -> bool:
def _contains_has_gpu(node: ast.AST) -> bool:
if isinstance(node, ast.Name) and node.id in ["HAS_GPU", "RUN_GPU"]:
return True
elif isinstance(node, ast.BoolOp):
return any(_contains_has_gpu(value) for value in node.values)
elif isinstance(node, ast.UnaryOp):
return _contains_has_gpu(node.operand)
elif isinstance(node, ast.Compare):
return _contains_has_gpu(node.left) or any(
_contains_has_gpu(comp) for comp in node.comparators
)
elif isinstance(node, (ast.IfExp, ast.Call)):
return False
return False
for node in ast.walk(tree):
# Detect if __name__ == "__main__":
if isinstance(node, ast.If):
if (
isinstance(node.test, ast.Compare)
and isinstance(node.test.left, ast.Name)
and node.test.left.id == "__name__"
):
if any(
isinstance(comp, ast.Constant) and comp.value == "__main__"
for comp in node.test.comparators
):
for inner_node in node.body:
if isinstance(inner_node, ast.If) and _contains_has_gpu(
inner_node.test
):
return True
return False
| LintMessage |
python | django__django | tests/postgres_tests/models.py | {
"start": 1734,
"end": 1842
} | class ____(PostgreSQLModel):
field = ArrayField(ArrayField(models.IntegerField()))
| NestedIntegerArrayModel |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 66710,
"end": 66797
} | class ____(VariableTracker):
"""Marker used to implement delattr()"""
| DeletedVariable |
python | python__mypy | mypy/test/testtypes.py | {
"start": 62183,
"end": 62821
} | class ____(TestCase):
# WARNING: do not increase this number unless absolutely necessary,
# and you understand what you are doing.
ALLOWED_GET_PROPER_TYPES = 7
@skipUnless(mypy.expandtype.__file__.endswith(".py"), "Skip for compiled mypy")
def test_count_get_proper_type(self) -> None:
with open(mypy.expandtype.__file__) as f:
code = f.read()
get_proper_type_count = len(re.findall(r"get_proper_type\(", code))
get_proper_type_count -= len(re.findall(r"get_proper_type\(\)", code))
assert get_proper_type_count == self.ALLOWED_GET_PROPER_TYPES
| TestExpandTypeLimitGetProperType |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 24536,
"end": 24994
} | class ____(DelegatingLexer):
"""
Subclass of the `MakoLexer` that highlights unlexed data
with the `CssLexer`.
.. versionadded:: 0.7
"""
name = 'CSS+Mako'
aliases = ['css+mako']
mimetypes = ['text/css+mako']
def __init__(self, **options):
super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
**options)
# Genshi and Cheetah lexers courtesy of Matt Good.
| MakoCssLexer |
python | TheAlgorithms__Python | data_structures/binary_tree/flatten_binarytree_to_linkedlist.py | {
"start": 456,
"end": 3408
} | class ____:
"""
A TreeNode has data variable and pointers to TreeNode objects
for its left and right children.
"""
def __init__(self, data: int) -> None:
self.data = data
self.left: TreeNode | None = None
self.right: TreeNode | None = None
def build_tree() -> TreeNode:
"""
Build and return a sample binary tree.
Returns:
TreeNode: The root of the binary tree.
Examples:
>>> root = build_tree()
>>> root.data
1
>>> root.left.data
2
>>> root.right.data
5
>>> root.left.left.data
3
>>> root.left.right.data
4
>>> root.right.right.data
6
"""
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(5)
root.left.left = TreeNode(3)
root.left.right = TreeNode(4)
root.right.right = TreeNode(6)
return root
def flatten(root: TreeNode | None) -> None:
"""
Flatten a binary tree into a linked list in-place, where the linked list is
represented using the right pointers of the tree nodes.
Args:
root (TreeNode): The root of the binary tree to be flattened.
Examples:
>>> root = TreeNode(1)
>>> root.left = TreeNode(2)
>>> root.right = TreeNode(5)
>>> root.left.left = TreeNode(3)
>>> root.left.right = TreeNode(4)
>>> root.right.right = TreeNode(6)
>>> flatten(root)
>>> root.data
1
>>> root.right.right is None
False
>>> root.right.right = TreeNode(3)
>>> root.right.right.right is None
True
"""
if not root:
return
# Flatten the left subtree
flatten(root.left)
# Save the right subtree
right_subtree = root.right
# Make the left subtree the new right subtree
root.right = root.left
root.left = None
# Find the end of the new right subtree
current = root
while current.right:
current = current.right
# Append the original right subtree to the end
current.right = right_subtree
# Flatten the updated right subtree
flatten(right_subtree)
def display_linked_list(root: TreeNode | None) -> None:
"""
Display the flattened linked list.
Args:
root (TreeNode | None): The root of the flattened linked list.
Examples:
>>> root = TreeNode(1)
>>> root.right = TreeNode(2)
>>> root.right.right = TreeNode(3)
>>> display_linked_list(root)
1 2 3
>>> root = None
>>> display_linked_list(root)
"""
current = root
while current:
if current.right is None:
print(current.data, end="")
break
print(current.data, end=" ")
current = current.right
if __name__ == "__main__":
print("Flattened Linked List:")
root = build_tree()
flatten(root)
display_linked_list(root)
| TreeNode |
python | lxml__lxml | src/lxml/html/diff.py | {
"start": 32304,
"end": 32989
} | class ____(SequenceMatcher):
"""
Acts like SequenceMatcher, but tries not to find very small equal
blocks amidst large spans of changes
"""
threshold = 2
@cython.cfunc
def get_matching_blocks(self) -> list:
size: cython.Py_ssize_t = min(len(self.b), len(self.b))
threshold: cython.Py_ssize_t = self.threshold
threshold = min(threshold, size // 4)
actual = SequenceMatcher.get_matching_blocks(self)
return [item for item in actual
if item[2] > threshold
or not item[2]]
if __name__ == '__main__':
from lxml.html import _diffcommand
_diffcommand.main()
| InsensitiveSequenceMatcher |
python | mlflow__mlflow | mlflow/server/graphql/graphql_schema_extensions.py | {
"start": 986,
"end": 1852
} | class ____(MlflowRun):
experiment = graphene.Field(MlflowExperiment)
model_versions = graphene.List(graphene.NonNull(MlflowModelVersion))
def resolve_experiment(self, info):
experiment_id = self.info.experiment_id
input_dict = {"experiment_id": experiment_id}
request_message = mlflow.protos.service_pb2.GetExperiment()
parse_dict(input_dict, request_message)
return mlflow.server.handlers.get_experiment_impl(request_message).experiment
def resolve_model_versions(self, info):
run_id = self.info.run_id
input_dict = {"filter": f"run_id='{run_id}'"}
request_message = mlflow.protos.model_registry_pb2.SearchModelVersions()
parse_dict(input_dict, request_message)
return mlflow.server.handlers.search_model_versions_impl(request_message).model_versions
| MlflowRunExtension |
python | streamlit__streamlit | lib/streamlit/elements/widgets/time_widgets.py | {
"start": 14357,
"end": 14842
} | class ____:
value: time | None
def deserialize(self, ui_value: str | None) -> time | None:
return (
datetime.strptime(ui_value, "%H:%M").time()
if ui_value is not None
else self.value
)
def serialize(self, v: datetime | time | None) -> str | None:
if v is None:
return None
if isinstance(v, datetime):
v = v.time()
return time.strftime(v, "%H:%M")
@dataclass
| TimeInputSerde |
python | django__django | tests/admin_inlines/models.py | {
"start": 9028,
"end": 9110
} | class ____(Course):
class Meta:
proxy = True
# Other models
| CourseProxy2 |
python | fluentpython__example-code | 21-class-metaprog/bulkfood/model_v8.py | {
"start": 1901,
"end": 2113
} | class ____(metaclass=EntityMeta):
"""Business entity with validated fields"""
@classmethod
def field_names(cls): # <5>
for name in cls._field_names:
yield name
# END MODEL_V8
| Entity |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 34408,
"end": 36016
} | class ____(Widget, Generic[SliderValueT]):
"""A representation of ``st.slider``."""
_value: SliderValueT | Sequence[SliderValueT] | None
proto: SliderProto = field(repr=False)
label: str
data_type: SliderProto.DataType.ValueType
min: SliderValueT
max: SliderValueT
step: SliderStep
help: str
form_id: str
def __init__(self, proto: SliderProto, root: ElementTree) -> None:
super().__init__(proto, root)
self.type = "slider"
def set_value(
self, v: SliderValueT | Sequence[SliderValueT]
) -> Slider[SliderValueT]:
"""Set the (single) value of the slider."""
self._value = v
return self
@property
def _widget_state(self) -> WidgetState:
data_type = self.proto.data_type
serde = SliderSerde([], data_type, True, None)
v = serde.serialize(self.value)
ws = WidgetState()
ws.id = self.id
ws.double_array_value.data[:] = v
return ws
@property
def value(self) -> SliderValueT | Sequence[SliderValueT]:
"""The currently selected value or range. (Any or Sequence of Any)""" # noqa: D400
if self._value is not None:
return self._value
state = self.root.session_state
assert state
# Awkward to do this with `cast`
return state[self.id] # type: ignore
def set_range(
self, lower: SliderValueT, upper: SliderValueT
) -> Slider[SliderValueT]:
"""Set the ranged value of the slider."""
return self.set_value([lower, upper])
@dataclass(repr=False)
| Slider |
python | ray-project__ray | rllib/examples/connectors/classes/euclidian_distance_based_curiosity.py | {
"start": 277,
"end": 4959
} | class ____(ConnectorV2):
"""Learner ConnectorV2 piece computing intrinsic rewards with euclidian distance.
Add this connector piece to your Learner pipeline, through your algo config:
```
config.training(
learner_connector=lambda obs_sp, act_sp: EuclidianDistanceBasedCuriosity()
)
```
Intrinsic rewards are computed on the Learner side based on comparing the euclidian
distance of observations vs already seen ones. A configurable number of observations
will be stored in a FIFO buffer and all incoming observations have their distance
measured against those.
The minimum distance measured is the intrinsic reward for the incoming obs
(multiplied by a fixed coeffieicnt and added to the "main" extrinsic reward):
r(i) = intrinsic_reward_coeff * min(ED(o, o(i)) for o in stored_obs))
where `ED` is the euclidian distance and `stored_obs` is the buffer.
The intrinsic reward is then added to the extrinsic reward and saved back into the
episode (under the main "rewards" key).
Note that the computation and saving back to the episode all happens before the
actual train batch is generated from the episode data. Thus, the Learner and the
RLModule used do not take notice of the extra reward added.
Only one observation per incoming episode will be stored as a new one in the buffer.
Thereby, we pick the observation with the largest `min(ED)` value over all already
stored observations to be stored per episode.
If you would like to use a simpler, count-based mechanism for intrinsic reward
computations, take a look at the `CountBasedCuriosity` connector piece
at `ray.rllib.examples.connectors.classes.count_based_curiosity`
"""
def __init__(
self,
input_observation_space: Optional[gym.Space] = None,
input_action_space: Optional[gym.Space] = None,
*,
intrinsic_reward_coeff: float = 1.0,
max_buffer_size: int = 100,
**kwargs,
):
"""Initializes a CountBasedCuriosity instance.
Args:
intrinsic_reward_coeff: The weight with which to multiply the intrinsic
reward before adding (and saving) it back to the main (extrinsic)
reward of the episode at each timestep.
"""
super().__init__(input_observation_space, input_action_space)
# Create an observation buffer
self.obs_buffer = deque(maxlen=max_buffer_size)
self.intrinsic_reward_coeff = intrinsic_reward_coeff
self._test = 0
def __call__(
self,
*,
rl_module: RLModule,
batch: Any,
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
if self._test > 10:
return batch
self._test += 1
# Loop through all episodes and change the reward to
# [reward + intrinsic reward]
for sa_episode in self.single_agent_episode_iterator(
episodes=episodes, agents_that_stepped_only=False
):
# Loop through all obs, except the last one.
observations = sa_episode.get_observations(slice(None, -1))
# Get all respective (extrinsic) rewards.
rewards = sa_episode.get_rewards()
max_dist_obs = None
max_dist = float("-inf")
for i, (obs, rew) in enumerate(zip(observations, rewards)):
# Compare obs to all stored observations and compute euclidian distance.
min_dist = 0.0
if self.obs_buffer:
min_dist = min(
np.sqrt(np.sum((obs - stored_obs) ** 2))
for stored_obs in self.obs_buffer
)
if min_dist > max_dist:
max_dist = min_dist
max_dist_obs = obs
# Compute our euclidian distance-based intrinsic reward and add it to
# the main (extrinsic) reward.
rew += self.intrinsic_reward_coeff * min_dist
# Store the new reward back to the episode (under the correct
# timestep/index).
sa_episode.set_rewards(new_data=rew, at_indices=i)
# Add the one observation of this episode with the largest (min) euclidian
# dist to all already stored obs to the buffer (maybe throwing out the
# oldest obs in there).
if max_dist_obs is not None:
self.obs_buffer.append(max_dist_obs)
return batch
| EuclidianDistanceBasedCuriosity |
python | PyCQA__flake8 | src/flake8/exceptions.py | {
"start": 249,
"end": 345
} | class ____(Flake8Exception):
"""Exception raised during execution of Flake8."""
| ExecutionError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1004148,
"end": 1004676
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of TransferEnterpriseOrganization"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "organization")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
organization = sgqlc.types.Field("Organization", graphql_name="organization")
"""The organization for which a transfer was initiated."""
| TransferEnterpriseOrganizationPayload |
python | charliermarsh__ruff | scripts/update_schemastore.py | {
"start": 984,
"end": 5215
} | class ____(enum.Enum):
SSH = "ssh"
HTTPS = "https"
def schemastore_repos(self) -> SchemastoreRepos:
match self:
case GitProtocol.SSH:
return SchemastoreRepos(
fork="git@github.com:astral-sh/schemastore.git",
upstream="git@github.com:SchemaStore/schemastore.git",
)
case GitProtocol.HTTPS:
return SchemastoreRepos(
fork="https://github.com/astral-sh/schemastore.git",
upstream="https://github.com/SchemaStore/schemastore.git",
)
case _:
assert_never(self)
def update_schemastore(
schemastore_path: Path, schemastore_repos: SchemastoreRepos
) -> None:
if not (schemastore_path / ".git").is_dir():
check_call(
["git", "clone", schemastore_repos.fork, schemastore_path, "--depth=1"]
)
check_call(
[
"git",
"remote",
"add",
"upstream",
schemastore_repos.upstream,
],
cwd=schemastore_path,
)
# Create a new branch tagged with the current ruff commit up to date with the latest
# upstream schemastore
check_call(["git", "fetch", "upstream"], cwd=schemastore_path)
current_sha = check_output(["git", "rev-parse", "HEAD"], text=True).strip()
branch = f"update-ruff-{current_sha}"
check_call(
["git", "switch", "-c", branch],
cwd=schemastore_path,
)
check_call(
["git", "reset", "--hard", "upstream/master"],
cwd=schemastore_path,
)
# Run npm install
src = schemastore_path / "src"
check_call(["npm", "install"], cwd=schemastore_path)
# Update the schema and format appropriately
schema = json.loads(RUFF_SCHEMA.read_text())
schema["$id"] = "https://json.schemastore.org/ruff.json"
(src / RUFF_JSON).write_text(
json.dumps(dict(schema.items()), indent=2, ensure_ascii=False),
)
check_call(
[
"../node_modules/prettier/bin/prettier.cjs",
"--plugin",
"prettier-plugin-sort-json",
"--write",
RUFF_JSON,
],
cwd=src,
)
# Check if the schema has changed
# https://stackoverflow.com/a/9393642/3549270
if check_output(["git", "status", "-s"], cwd=schemastore_path).strip():
# Schema has changed, commit and push
commit_url = f"{RUFF_REPO}/commit/{current_sha}"
commit_body = (
f"This updates ruff's JSON schema to [{current_sha}]({commit_url})"
)
# https://stackoverflow.com/a/22909204/3549270
check_call(["git", "add", (src / RUFF_JSON).as_posix()], cwd=schemastore_path)
check_call(
[
"git",
"commit",
"-m",
"Update ruff's JSON schema",
"-m",
commit_body,
],
cwd=schemastore_path,
)
# This should show the link to create a PR
check_call(
["git", "push", "--set-upstream", "origin", branch, "--force"],
cwd=schemastore_path,
)
else:
print("No changes")
def determine_git_protocol(argv: list[str] | None = None) -> GitProtocol:
import argparse
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--proto",
choices=[proto.value for proto in GitProtocol],
default="https",
help="Protocol to use for git authentication",
)
args = parser.parse_args(argv)
return GitProtocol(args.proto)
def main() -> None:
schemastore_repos = determine_git_protocol().schemastore_repos()
schemastore_existing = RUFF_ROOT / "schemastore"
if schemastore_existing.is_dir():
update_schemastore(schemastore_existing, schemastore_repos)
else:
with TemporaryDirectory(prefix="ruff-schemastore-") as temp_dir:
update_schemastore(Path(temp_dir), schemastore_repos)
if __name__ == "__main__":
main()
| GitProtocol |
python | django__django | tests/admin_views/admin.py | {
"start": 13398,
"end": 13470
} | class ____(admin.StackedInline):
model = FancyDoodad
| FancyDoodadInline |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/json.py | {
"start": 159,
"end": 5876
} | class ____:
"""Splits JSON data into smaller, structured chunks while preserving hierarchy.
This class provides methods to split JSON data into smaller dictionaries or
JSON-formatted strings based on configurable maximum and minimum chunk sizes.
It supports nested JSON structures, optionally converts lists into dictionaries
for better chunking, and allows the creation of document objects for further use.
"""
max_chunk_size: int = 2000
"""The maximum size for each chunk."""
min_chunk_size: int = 1800
"""The minimum size for each chunk, derived from `max_chunk_size` if not
explicitly provided."""
def __init__(
self, max_chunk_size: int = 2000, min_chunk_size: int | None = None
) -> None:
"""Initialize the chunk size configuration for text processing.
This constructor sets up the maximum and minimum chunk sizes, ensuring that
the `min_chunk_size` defaults to a value slightly smaller than the
`max_chunk_size` if not explicitly provided.
Args:
max_chunk_size: The maximum size for a chunk.
min_chunk_size: The minimum size for a chunk. If `None`,
defaults to the maximum chunk size minus 200, with a lower bound of 50.
"""
super().__init__()
self.max_chunk_size = max_chunk_size
self.min_chunk_size = (
min_chunk_size
if min_chunk_size is not None
else max(max_chunk_size - 200, 50)
)
@staticmethod
def _json_size(data: dict[str, Any]) -> int:
"""Calculate the size of the serialized JSON object."""
return len(json.dumps(data))
@staticmethod
def _set_nested_dict(
d: dict[str, Any],
path: list[str],
value: Any, # noqa: ANN401
) -> None:
"""Set a value in a nested dictionary based on the given path."""
for key in path[:-1]:
d = d.setdefault(key, {})
d[path[-1]] = value
def _list_to_dict_preprocessing(
self,
data: Any, # noqa: ANN401
) -> Any: # noqa: ANN401
if isinstance(data, dict):
# Process each key-value pair in the dictionary
return {k: self._list_to_dict_preprocessing(v) for k, v in data.items()}
if isinstance(data, list):
# Convert the list to a dictionary with index-based keys
return {
str(i): self._list_to_dict_preprocessing(item)
for i, item in enumerate(data)
}
# Base case: the item is neither a dict nor a list, so return it unchanged
return data
def _json_split(
self,
data: Any, # noqa: ANN401
current_path: list[str] | None = None,
chunks: list[dict[str, Any]] | None = None,
) -> list[dict[str, Any]]:
"""Split json into maximum size dictionaries while preserving structure."""
current_path = current_path or []
chunks = chunks if chunks is not None else [{}]
if isinstance(data, dict):
for key, value in data.items():
new_path = [*current_path, key]
chunk_size = self._json_size(chunks[-1])
size = self._json_size({key: value})
remaining = self.max_chunk_size - chunk_size
if size < remaining:
# Add item to current chunk
self._set_nested_dict(chunks[-1], new_path, value)
else:
if chunk_size >= self.min_chunk_size:
# Chunk is big enough, start a new chunk
chunks.append({})
# Iterate
self._json_split(value, new_path, chunks)
else:
# handle single item
self._set_nested_dict(chunks[-1], current_path, data)
return chunks
def split_json(
self,
json_data: dict[str, Any],
convert_lists: bool = False, # noqa: FBT001,FBT002
) -> list[dict[str, Any]]:
"""Splits JSON into a list of JSON chunks."""
if convert_lists:
chunks = self._json_split(self._list_to_dict_preprocessing(json_data))
else:
chunks = self._json_split(json_data)
# Remove the last chunk if it's empty
if not chunks[-1]:
chunks.pop()
return chunks
def split_text(
self,
json_data: dict[str, Any],
convert_lists: bool = False, # noqa: FBT001,FBT002
ensure_ascii: bool = True, # noqa: FBT001,FBT002
) -> list[str]:
"""Splits JSON into a list of JSON formatted strings."""
chunks = self.split_json(json_data=json_data, convert_lists=convert_lists)
# Convert to string
return [json.dumps(chunk, ensure_ascii=ensure_ascii) for chunk in chunks]
def create_documents(
self,
texts: list[dict[str, Any]],
convert_lists: bool = False, # noqa: FBT001,FBT002
ensure_ascii: bool = True, # noqa: FBT001,FBT002
metadatas: list[dict[Any, Any]] | None = None,
) -> list[Document]:
"""Create a list of `Document` objects from a list of json objects (`dict`)."""
metadatas_ = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
for chunk in self.split_text(
json_data=text, convert_lists=convert_lists, ensure_ascii=ensure_ascii
):
metadata = copy.deepcopy(metadatas_[i])
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
| RecursiveJsonSplitter |
python | ray-project__ray | rllib/examples/centralized_critic.py | {
"start": 3082,
"end": 8236
} | class ____:
"""Add method to evaluate the central value function from the model."""
def __init__(self):
if self.config["framework"] != "torch":
self.compute_central_vf = make_tf_callable(self.get_session())(
self.model.central_value_function
)
else:
self.compute_central_vf = self.model.central_value_function
# Grabs the opponent obs/act and includes it in the experience train_batch,
# and computes GAE using the central vf predictions.
def centralized_critic_postprocessing(
policy, sample_batch, other_agent_batches=None, episode=None
):
pytorch = policy.config["framework"] == "torch"
if (pytorch and hasattr(policy, "compute_central_vf")) or (
not pytorch and policy.loss_initialized()
):
assert other_agent_batches is not None
[(_, _, opponent_batch)] = list(other_agent_batches.values())
# also record the opponent obs and actions in the trajectory
sample_batch[OPPONENT_OBS] = opponent_batch[SampleBatch.CUR_OBS]
sample_batch[OPPONENT_ACTION] = opponent_batch[SampleBatch.ACTIONS]
# overwrite default VF prediction with the central VF
if args.framework == "torch":
sample_batch[SampleBatch.VF_PREDS] = (
policy.compute_central_vf(
convert_to_torch_tensor(
sample_batch[SampleBatch.CUR_OBS], policy.device
),
convert_to_torch_tensor(sample_batch[OPPONENT_OBS], policy.device),
convert_to_torch_tensor(
sample_batch[OPPONENT_ACTION], policy.device
),
)
.cpu()
.detach()
.numpy()
)
else:
sample_batch[SampleBatch.VF_PREDS] = convert_to_numpy(
policy.compute_central_vf(
sample_batch[SampleBatch.CUR_OBS],
sample_batch[OPPONENT_OBS],
sample_batch[OPPONENT_ACTION],
)
)
else:
# Policy hasn't been initialized yet, use zeros.
sample_batch[OPPONENT_OBS] = np.zeros_like(sample_batch[SampleBatch.CUR_OBS])
sample_batch[OPPONENT_ACTION] = np.zeros_like(sample_batch[SampleBatch.ACTIONS])
sample_batch[SampleBatch.VF_PREDS] = np.zeros_like(
sample_batch[SampleBatch.REWARDS], dtype=np.float32
)
completed = sample_batch[SampleBatch.TERMINATEDS][-1]
if completed:
last_r = 0.0
else:
last_r = sample_batch[SampleBatch.VF_PREDS][-1]
train_batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"],
)
return train_batch
# Copied from PPO but optimizing the central value function.
def loss_with_central_critic(policy, base_policy, model, dist_class, train_batch):
# Save original value function.
vf_saved = model.value_function
# Calculate loss with a custom value function.
model.value_function = lambda: policy.model.central_value_function(
train_batch[SampleBatch.CUR_OBS],
train_batch[OPPONENT_OBS],
train_batch[OPPONENT_ACTION],
)
policy._central_value_out = model.value_function()
loss = base_policy.loss(model, dist_class, train_batch)
# Restore original value function.
model.value_function = vf_saved
return loss
def central_vf_stats(policy, train_batch):
# Report the explained variance of the central value function.
return {
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS], policy._central_value_out
)
}
def get_ccppo_policy(base):
class CCPPOTFPolicy(CentralizedValueMixin, base):
def __init__(self, observation_space, action_space, config):
base.__init__(self, observation_space, action_space, config)
CentralizedValueMixin.__init__(self)
@override(base)
def loss(self, model, dist_class, train_batch):
# Use super() to get to the base PPO policy.
# This special loss function utilizes a shared
# value function defined on self, and the loss function
# defined on PPO policies.
return loss_with_central_critic(
self, super(), model, dist_class, train_batch
)
@override(base)
def postprocess_trajectory(
self, sample_batch, other_agent_batches=None, episode=None
):
return centralized_critic_postprocessing(
self, sample_batch, other_agent_batches, episode
)
@override(base)
def stats_fn(self, train_batch: SampleBatch):
stats = super().stats_fn(train_batch)
stats.update(central_vf_stats(self, train_batch))
return stats
return CCPPOTFPolicy
CCPPOStaticGraphTFPolicy = get_ccppo_policy(PPOTF1Policy)
CCPPOEagerTFPolicy = get_ccppo_policy(PPOTF2Policy)
| CentralizedValueMixin |
python | PyCQA__pylint | pylint/exceptions.py | {
"start": 1243,
"end": 1352
} | class ____(Exception):
"""Raised when a report is empty and so should not be displayed."""
| EmptyReportError |
python | google__pytype | pytype/imports/builtin_stubs.py | {
"start": 1287,
"end": 3291
} | class ____:
"""The builtins and typing modules, which need to be treated specially."""
def _parse_predefined(self, name, options):
_, src = GetPredefinedFile("builtins", name, ".pytd")
mod = parser.parse_string(src, name=name, options=options)
return mod
def load(self, options):
"""Read builtins.pytd and typing.pytd, and return the parsed modules."""
t = self._parse_predefined("typing", options)
b = self._parse_predefined("builtins", options)
b = b.Visit(
visitors.LookupExternalTypes({"typing": t}, self_name="builtins")
)
t = t.Visit(visitors.LookupBuiltins(b))
b = b.Visit(visitors.NamedTypeToClassType())
t = t.Visit(visitors.NamedTypeToClassType())
b = b.Visit(visitors.AdjustTypeParameters())
t = t.Visit(visitors.AdjustTypeParameters())
b = b.Visit(visitors.CanonicalOrderingVisitor())
t = t.Visit(visitors.CanonicalOrderingVisitor())
b.Visit(visitors.FillInLocalPointers({"": b, "typing": t, "builtins": b}))
t.Visit(visitors.FillInLocalPointers({"": t, "typing": t, "builtins": b}))
b.Visit(visitors.VerifyLookup())
t.Visit(visitors.VerifyLookup())
b.Visit(visitors.VerifyContainers())
t.Visit(visitors.VerifyContainers())
return b, t
def GetPredefinedFile(
stubs_subdir, module, extension=".pytd", as_package=False
):
"""Get the contents of a predefined PyTD, typically with a file name *.pytd.
Arguments:
stubs_subdir: the directory, typically "builtins" or "stdlib"
module: module name (e.g., "sys" or "__builtins__")
extension: either ".pytd" or ".py"
as_package: try the module as a directory with an __init__ file
Returns:
The contents of the file
Raises:
IOError: if file not found
"""
parts = module.split(".")
if as_package:
parts.append("__init__")
mod_path = path_utils.join(*parts) + extension
path = path_utils.join("stubs", stubs_subdir, mod_path)
return path, pytype_source_utils.load_text_file(path)
| BuiltinsAndTyping |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-longest-consecutive-sequence.py | {
"start": 29,
"end": 813
} | class ____(object):
def longestConsecutive(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.max_len = 0
def longestConsecutiveHelper(root):
if not root:
return 0
left_len = longestConsecutiveHelper(root.left)
right_len = longestConsecutiveHelper(root.right)
cur_len = 1
if root.left and root.left.val == root.val + 1:
cur_len = max(cur_len, left_len + 1)
if root.right and root.right.val == root.val + 1:
cur_len = max(cur_len, right_len + 1)
self.max_len = max(self.max_len, cur_len)
return cur_len
longestConsecutiveHelper(root)
return self.max_len
| Solution |
python | Textualize__textual | src/textual/events.py | {
"start": 23921,
"end": 24058
} | class ____(Event, bubble=False):
"""Sent to screen that has been made active.
- [ ] Bubbles
- [ ] Verbose
"""
| ScreenResume |
python | walkccc__LeetCode | solutions/2747. Count Zero Request Servers/2747.py | {
"start": 183,
"end": 1067
} | class ____:
def countServers(
self,
n: int,
logs: list[list[int]],
x: int,
queries: list[int],
) -> list[int]:
ans = [0] * len(queries)
count = [0] * (n + 1)
logs.sort(key=lambda x: x[1])
i = 0
j = 0
servers = 0
# For each query, we care about logs[i..j].
for queryIndex, query in sorted([IndexedQuery(i, query)
for i, query in enumerate(queries)],
key=lambda x: x.query):
while j < len(logs) and logs[j][1] <= query:
count[logs[j][0]] += 1
if count[logs[j][0]] == 1:
servers += 1
j += 1
while i < len(logs) and logs[i][1] < query - x:
count[logs[i][0]] -= 1
if count[logs[i][0]] == 0:
servers -= 1
i += 1
ans[queryIndex] = n - servers
return ans
| Solution |
python | django__django | tests/model_inheritance_regress/models.py | {
"start": 2076,
"end": 2158
} | class ____(Article):
author = models.CharField(max_length=100)
| ArticleWithAuthor |
python | huggingface__transformers | src/transformers/models/time_series_transformer/modeling_time_series_transformer.py | {
"start": 18626,
"end": 21823
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: TimeSeriesTransformerConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = TimeSeriesTransformerAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
config=config,
layer_idx=layer_idx,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: torch.FloatTensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor, Optional[torch.FloatTensor]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer, with BART->TIME_SERIES_TRANSFORMER
| TimeSeriesTransformerEncoderLayer |
python | spack__spack | lib/spack/spack/url_buildcache.py | {
"start": 1422,
"end": 2309
} | class ____(enum.Enum):
"""Enumeration of the kinds of things that live in a URL buildcache
These enums serve two purposes: They allow different buildcache layout
versions to specify different relative location of these entities, and
they're used to map buildcache objects to their respective media types.
"""
# manifest files
MANIFEST = enum.auto()
# metadata file for a binary package
SPEC = enum.auto()
# things that live in the blobs directory
BLOB = enum.auto()
# binary mirror index
INDEX = enum.auto()
# public key used for verifying signed binary packages
KEY = enum.auto()
# index of all public keys found in the mirror
KEY_INDEX = enum.auto()
# compressed archive of spec installation directory
TARBALL = enum.auto()
# binary mirror descriptor file
LAYOUT_JSON = enum.auto()
| BuildcacheComponent |
python | pypa__warehouse | warehouse/accounts/security_policy.py | {
"start": 4251,
"end": 8817
} | class ____:
"""The BasicAuthSecurityPolicy is no longer allowed
and raises a message when used for uploads when it's not an API Token"""
def identity(self, request):
# If we're calling into this API on a request, then we want to register
# a callback which will ensure that the response varies based on the
# Authorization header.
request.add_response_callback(add_vary_callback("Authorization"))
request.authentication_method = AuthenticationMethod.BASIC_AUTH
if not request.matched_route:
return None
if request.matched_route.name != "forklift.legacy.file_upload":
return None
credentials = extract_http_basic_credentials(request)
if credentials is None:
return None
username, _password = credentials
# The API Token username is allowed to pass through to the
# MacaroonSecurityPolicy.
if username == "__token__":
return None
raise _format_exc_status(
HTTPForbidden(),
"Username/Password authentication is no longer supported. "
"Migrate to API Tokens or Trusted Publishers instead. "
f"See {request.help_url(_anchor='apitoken')} "
f"and {request.help_url(_anchor='trusted-publishers')}",
)
def forget(self, request, **kw):
# No-op.
return []
def remember(self, request, userid, **kw):
# No-op.
return []
def authenticated_userid(self, request):
raise NotImplementedError
def permits(self, request, context, permission):
raise NotImplementedError
def _permits_for_user_policy(acl, request, context, permission):
# It should only be possible for request.identity to be a UserContext object
# at this point, and we only allow a UserContext in these policies.
# Note that the UserContext object must not have a macaroon, since a macaroon
# is present during an API-token-authenticated request, not a session.
assert isinstance(request.identity, UserContext)
assert request.identity.macaroon is None
# Dispatch to our ACL
# NOTE: These parameters are in a different order than the signature of this method.
res = acl.permits(context, principals_for(request.identity), permission)
# Verify email before you can manage account/projects.
if (
isinstance(res, Allowed)
and not request.identity.user.has_primary_verified_email
and request.matched_route.name
not in {"manage.unverified-account", "accounts.verify-email"}
):
return WarehouseDenied("unverified", reason="unverified_email")
# If our underlying permits allowed this, we will check our 2FA status,
# that might possibly return a reason to deny the request anyways, and if
# it does we'll return that.
if isinstance(res, Allowed):
mfa = _check_for_mfa(request, context)
if mfa is not None:
return mfa
return res
def _check_for_mfa(request, context) -> WarehouseDenied | None:
# It should only be possible for request.identity to be a UserContext object
# at this point, and we only allow a UserContext in these policies.
# Note that the UserContext object must not have a macaroon, since a macaroon
# is present during an API-token-authenticated request, not a session.
assert isinstance(request.identity, UserContext)
assert request.identity.macaroon is None
if request.identity.user.has_two_factor:
# We're good to go!
return None
# Return a different message for upload endpoint first.
if request.matched_route.name == "forklift.legacy.file_upload":
return WarehouseDenied(
"You must enable two factor authentication to upload",
reason="upload_2fa_required",
)
# Management routes that don't require 2FA, mostly to set up 2FA.
_exempt_routes = [
"manage.account.recovery-codes",
"manage.account.totp-provision",
"manage.account.two-factor",
"manage.account.webauthn-provision",
"manage.unverified-account",
"accounts.verify-email",
]
if request.matched_route.name == "manage.account" or any(
request.matched_route.name.startswith(route) for route in _exempt_routes
):
return None
# No exemptions matched, 2FA is required.
return WarehouseDenied(
"You must enable two factor authentication.",
reason="manage_2fa_required",
)
| BasicAuthSecurityPolicy |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 30640,
"end": 30908
} | class ____(graphene.Union):
"""The output from logging telemetry."""
class Meta:
types = (
GrapheneLogTelemetrySuccess,
GraphenePythonError,
)
name = "LogTelemetryMutationResult"
| GrapheneLogTelemetryMutationResult |
python | google__pytype | pytype/test_data/pytree.py | {
"start": 17740,
"end": 19033
} | class ____(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
| LeafPattern |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/tests/test_logging.py | {
"start": 117,
"end": 7275
} | class ____:
"""Tests for the KopfObjectJsonFormatter"""
def test_filters_unserializable_kopf_fields(self):
"""
Test that kopf-specific fields (k8s_skip, k8s_ref, settings) are
filtered out from the log record.
"""
formatter = KopfObjectJsonFormatter()
# Create a log record with kopf-specific fields
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add kopf-specific attributes that should be filtered
record.k8s_skip = True # type: ignore
record.k8s_ref = {"kind": "Pod", "name": "test-pod"} # type: ignore
record.settings = {"some": "settings"} # type: ignore
# Format the record
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify kopf fields are NOT in the output
assert "k8s_skip" not in log_dict, "k8s_skip should be filtered out"
assert "settings" not in log_dict, "settings should be filtered out"
def test_adds_severity_field(self):
"""Test that the formatter adds a severity field with correct values"""
formatter = KopfObjectJsonFormatter()
# Test different log levels
test_cases = [
(logging.DEBUG, "debug"),
(logging.INFO, "info"),
(logging.WARNING, "warn"),
(logging.ERROR, "error"),
(logging.CRITICAL, "fatal"),
]
for level, expected_severity in test_cases:
record = logging.LogRecord(
name="kopf.test",
level=level,
pathname="test.py",
lineno=1,
msg=f"Test message at {logging.getLevelName(level)}",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
log_dict = json.loads(formatted)
assert "severity" in log_dict, (
f"severity field should be present for {logging.getLevelName(level)}"
)
assert log_dict["severity"] == expected_severity, (
f"Expected severity '{expected_severity}' for {logging.getLevelName(level)}, "
f"got '{log_dict['severity']}'"
)
def test_adds_kubernetes_object_reference(self):
"""Test that k8s_ref is added to the output under the 'object' key"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add k8s_ref attribute
k8s_ref: dict[str, Any] = {
"apiVersion": "v1",
"kind": "Pod",
"name": "test-pod",
"uid": "12345",
"namespace": "default",
}
record.k8s_ref = k8s_ref # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify the object field contains the k8s_ref
assert "object" in log_dict, (
"object field should be present when k8s_ref is set"
)
assert log_dict["object"] == k8s_ref, "object field should contain the k8s_ref"
def test_custom_refkey(self):
"""Test that a custom refkey can be used instead of 'object'"""
formatter = KopfObjectJsonFormatter(refkey="k8s_resource")
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
k8s_ref = {"kind": "Pod", "name": "test-pod"}
record.k8s_ref = k8s_ref # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Verify custom refkey is used
assert "k8s_resource" in log_dict, "Custom refkey should be present"
assert log_dict["k8s_resource"] == k8s_ref
def test_json_output_is_valid(self):
"""Test that the formatter produces valid JSON"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
# Should not raise a JSONDecodeError
log_dict = json.loads(formatted)
# Verify expected fields are present
assert "message" in log_dict, "message field should be present"
assert "severity" in log_dict, "severity field should be present"
assert log_dict["message"] == "Test message"
def test_timestamp_is_included(self):
"""Test that timestamp is included in the output"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# The formatter should include a timestamp
assert "timestamp" in log_dict, "timestamp field should be present"
def test_log_with_extra_fields(self):
"""Test that extra fields are included in the output"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Add extra custom field
record.custom_field = "custom_value" # type: ignore
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Custom field should be present
assert "custom_field" in log_dict
assert log_dict["custom_field"] == "custom_value"
def test_no_k8s_ref_attribute(self):
"""Test that formatter works correctly when k8s_ref is not present"""
formatter = KopfObjectJsonFormatter()
record = logging.LogRecord(
name="kopf.test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
# Don't set k8s_ref
formatted = formatter.format(record)
log_dict = json.loads(formatted)
# Should work fine, just without the object field
assert "message" in log_dict
assert "severity" in log_dict
# object field should not be present if k8s_ref is not set
assert "object" not in log_dict or log_dict["object"] is None
| TestKopfObjectJsonFormatter |
python | pypa__warehouse | warehouse/classifiers/models.py | {
"start": 199,
"end": 609
} | class ____(db.ModelBase):
__tablename__ = "trove_classifiers"
__tableargs__ = CheckConstraint(
"classifier not ilike 'private ::%'",
name="ck_disallow_private_top_level_classifier",
)
__repr__ = make_repr("classifier")
id: Mapped[int] = mapped_column(primary_key=True)
classifier: Mapped[str | None] = mapped_column(unique=True)
ordering: Mapped[int | None]
| Classifier |
python | pypa__warehouse | warehouse/admin/views/organizations.py | {
"start": 2081,
"end": 3569
} | class ____(wtforms.Form):
display_name = wtforms.StringField(
validators=[
wtforms.validators.InputRequired(
message="Specify organization display name"
),
wtforms.validators.Length(
max=100,
message="Organization display name must be 100 characters or less",
),
]
)
link_url = wtforms.URLField(
validators=[
wtforms.validators.InputRequired(message="Specify organization URL"),
wtforms.validators.Length(
max=400, message="Organization URL must be 400 characters or less"
),
wtforms.validators.Regexp(
r"^https?://",
message="Organization URL must start with http:// or https://",
),
]
)
description = wtforms.TextAreaField(
validators=[
wtforms.validators.InputRequired(
message="Specify organization description"
),
wtforms.validators.Length(
max=400,
message="Organization description must be 400 characters or less",
),
]
)
orgtype = wtforms.SelectField(
choices=[(orgtype.value, orgtype.value) for orgtype in OrganizationType],
coerce=OrganizationType,
validators=[
wtforms.validators.InputRequired(message="Select organization type"),
],
)
| OrganizationForm |
python | marshmallow-code__marshmallow | src/marshmallow/fields.py | {
"start": 69322,
"end": 71325
} | class ____(Field):
"""A field that takes the value returned by a function.
:param serialize: A callable from which to retrieve the value.
The function must take a single argument ``obj`` which is the object
to be serialized.
If no callable is provided then the ```load_only``` flag will be set
to True.
:param deserialize: A callable from which to retrieve the value.
The function must take a single argument ``value`` which is the value
to be deserialized.
If no callable is provided then ```value``` will be passed through
unchanged.
.. versionchanged:: 3.0.0a1
Removed ``func`` parameter.
.. versionchanged:: 4.0.0
Don't pass context to serialization and deserialization functions.
"""
_CHECK_ATTRIBUTE = False
def __init__(
self,
serialize: (
typing.Callable[[typing.Any], typing.Any]
| typing.Callable[[typing.Any, dict], typing.Any]
| None
) = None,
deserialize: (
typing.Callable[[typing.Any], typing.Any]
| typing.Callable[[typing.Any, dict], typing.Any]
| None
) = None,
**kwargs: Unpack[_BaseFieldKwargs], # FIXME: Omit dump_only and load_only
):
# Set dump_only and load_only based on arguments
kwargs["dump_only"] = bool(serialize) and not bool(deserialize)
kwargs["load_only"] = bool(deserialize) and not bool(serialize)
super().__init__(**kwargs)
self.serialize_func = serialize and utils.callable_or_raise(serialize)
self.deserialize_func = deserialize and utils.callable_or_raise(deserialize)
def _serialize(self, value, attr, obj, **kwargs):
return self.serialize_func(obj)
def _deserialize(self, value, attr, data, **kwargs):
if self.deserialize_func:
return self.deserialize_func(value)
return value
_ContantT = typing.TypeVar("_ContantT")
| Function |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_data_validation01.py | {
"start": 315,
"end": 962
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("data_validation01.xlsx")
def test_create_file(self):
"""Test the creation of a XlsxWriter file with data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
"C2",
{
"validate": "list",
"value": ["Foo", "Bar", "Baz"],
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pypa__pip | src/pip/_vendor/urllib3/contrib/socks.py | {
"start": 2327,
"end": 5042
} | class ____(HTTPConnection):
"""
A plain-text HTTP connection that connects via a SOCKS proxy.
"""
def __init__(self, *args, **kwargs):
self._socks_options = kwargs.pop("_socks_options")
super(SOCKSConnection, self).__init__(*args, **kwargs)
def _new_conn(self):
"""
Establish a new connection via the SOCKS proxy.
"""
extra_kw = {}
if self.source_address:
extra_kw["source_address"] = self.source_address
if self.socket_options:
extra_kw["socket_options"] = self.socket_options
try:
conn = socks.create_connection(
(self.host, self.port),
proxy_type=self._socks_options["socks_version"],
proxy_addr=self._socks_options["proxy_host"],
proxy_port=self._socks_options["proxy_port"],
proxy_username=self._socks_options["username"],
proxy_password=self._socks_options["password"],
proxy_rdns=self._socks_options["rdns"],
timeout=self.timeout,
**extra_kw
)
except SocketTimeout:
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
except socks.ProxyError as e:
# This is fragile as hell, but it seems to be the only way to raise
# useful errors here.
if e.socket_err:
error = e.socket_err
if isinstance(error, SocketTimeout):
raise ConnectTimeoutError(
self,
"Connection to %s timed out. (connect timeout=%s)"
% (self.host, self.timeout),
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % error
)
else:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
except SocketError as e: # Defensive: PySocks should catch all these.
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e
)
return conn
# We don't need to duplicate the Verified/Unverified distinction from
# urllib3/connection.py here because the HTTPSConnection will already have been
# correctly set to either the Verified or Unverified form by that module. This
# means the SOCKSHTTPSConnection will automatically be the correct type.
| SOCKSConnection |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 4364,
"end": 4615
} | class ____(BaseModel):
"""
A tool definition passed to the chat completion API.
Ref: https://platform.openai.com/docs/guides/function-calling
"""
type: Literal["function"]
function: FunctionToolDefinition | None = None
| ChatTool |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/result.py | {
"start": 11957,
"end": 28295
} | class ____(InPlaceGenerative, Generic[_R]):
__slots__ = ()
_real_result: Optional[Result[Unpack[TupleAny]]] = None
_generate_rows: bool = True
_row_logging_fn: Optional[Callable[[Any], Any]]
_unique_filter_state: Optional[_UniqueFilterStateType] = None
_post_creational_filter: Optional[Callable[[Any], Any]] = None
_is_cursor = False
_metadata: ResultMetaData
_source_supports_scalars: bool
def _fetchiter_impl(
self,
) -> Iterator[_InterimRowType[Row[Unpack[TupleAny]]]]:
raise NotImplementedError()
def _fetchone_impl(
self, hard_close: bool = False
) -> Optional[_InterimRowType[Row[Unpack[TupleAny]]]]:
raise NotImplementedError()
def _fetchmany_impl(
self, size: Optional[int] = None
) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]:
raise NotImplementedError()
def _fetchall_impl(
self,
) -> List[_InterimRowType[Row[Unpack[TupleAny]]]]:
raise NotImplementedError()
def _soft_close(self, hard: bool = False) -> None:
raise NotImplementedError()
@HasMemoized_ro_memoized_attribute
def _row_getter(self) -> Optional[Callable[..., _R]]:
real_result: Result[Unpack[TupleAny]] = (
self._real_result
if self._real_result
else cast("Result[Unpack[TupleAny]]", self)
)
if real_result._source_supports_scalars:
if not self._generate_rows:
return None
else:
_proc = Row
def process_row(
metadata: ResultMetaData,
processors: Optional[_ProcessorsType],
key_to_index: Dict[_KeyType, int],
scalar_obj: Any,
) -> Row[Unpack[TupleAny]]:
return _proc(
metadata, processors, key_to_index, (scalar_obj,)
)
else:
process_row = Row # type: ignore
metadata = self._metadata
key_to_index = metadata._key_to_index
processors = metadata._effective_processors
tf = metadata._tuplefilter
if tf and not real_result._source_supports_scalars:
if processors:
processors = tf(processors)
_make_row_orig: Callable[..., _R] = functools.partial( # type: ignore # noqa E501
process_row, metadata, processors, key_to_index
)
fixed_tf = tf
def make_row(row: _InterimRowType[Row[Unpack[TupleAny]]]) -> _R:
return _make_row_orig(fixed_tf(row))
else:
make_row = functools.partial( # type: ignore
process_row, metadata, processors, key_to_index
)
if real_result._row_logging_fn:
_log_row = real_result._row_logging_fn
_make_row = make_row
def make_row(row: _InterimRowType[Row[Unpack[TupleAny]]]) -> _R:
return _log_row(_make_row(row)) # type: ignore
return make_row
@HasMemoized_ro_memoized_attribute
def _iterator_getter(self) -> Callable[..., Iterator[_R]]:
make_row = self._row_getter
post_creational_filter = self._post_creational_filter
if self._unique_filter_state:
uniques, strategy = self._unique_strategy
def iterrows(self: Result[Unpack[TupleAny]]) -> Iterator[_R]:
for raw_row in self._fetchiter_impl():
obj: _InterimRowType[Any] = (
make_row(raw_row) if make_row else raw_row
)
hashed = strategy(obj) if strategy else obj
if hashed in uniques:
continue
uniques.add(hashed)
if post_creational_filter:
obj = post_creational_filter(obj)
yield obj # type: ignore
else:
def iterrows(self: Result[Unpack[TupleAny]]) -> Iterator[_R]:
for raw_row in self._fetchiter_impl():
row: _InterimRowType[Any] = (
make_row(raw_row) if make_row else raw_row
)
if post_creational_filter:
row = post_creational_filter(row)
yield row # type: ignore
return iterrows
def _raw_all_rows(self) -> List[_R]:
make_row = self._row_getter
assert make_row is not None
rows = self._fetchall_impl()
return [make_row(row) for row in rows]
def _allrows(self) -> List[_R]:
post_creational_filter = self._post_creational_filter
make_row = self._row_getter
rows = self._fetchall_impl()
made_rows: List[_InterimRowType[_R]]
if make_row:
made_rows = [make_row(row) for row in rows]
else:
made_rows = rows # type: ignore
interim_rows: List[_R]
if self._unique_filter_state:
uniques, strategy = self._unique_strategy
interim_rows = [
made_row # type: ignore
for made_row, sig_row in [
(
made_row,
strategy(made_row) if strategy else made_row,
)
for made_row in made_rows
]
if sig_row not in uniques and not uniques.add(sig_row) # type: ignore # noqa: E501
]
else:
interim_rows = made_rows # type: ignore
if post_creational_filter:
interim_rows = [
post_creational_filter(row) for row in interim_rows
]
return interim_rows
@HasMemoized_ro_memoized_attribute
def _onerow_getter(
self,
) -> Callable[..., Union[Literal[_NoRow._NO_ROW], _R]]:
make_row = self._row_getter
post_creational_filter = self._post_creational_filter
if self._unique_filter_state:
uniques, strategy = self._unique_strategy
def onerow(self: Result[Unpack[TupleAny]]) -> Union[_NoRow, _R]:
_onerow = self._fetchone_impl
while True:
row = _onerow()
if row is None:
return _NO_ROW
else:
obj: _InterimRowType[Any] = (
make_row(row) if make_row else row
)
hashed = strategy(obj) if strategy else obj
if hashed in uniques:
continue
else:
uniques.add(hashed)
if post_creational_filter:
obj = post_creational_filter(obj)
return obj # type: ignore
else:
def onerow(self: Result[Unpack[TupleAny]]) -> Union[_NoRow, _R]:
row = self._fetchone_impl()
if row is None:
return _NO_ROW
else:
interim_row: _InterimRowType[Any] = (
make_row(row) if make_row else row
)
if post_creational_filter:
interim_row = post_creational_filter(interim_row)
return interim_row # type: ignore
return onerow
@HasMemoized_ro_memoized_attribute
def _manyrow_getter(self) -> Callable[..., List[_R]]:
make_row = self._row_getter
post_creational_filter = self._post_creational_filter
if self._unique_filter_state:
uniques, strategy = self._unique_strategy
def filterrows(
make_row: Optional[Callable[..., _R]],
rows: List[Any],
strategy: Optional[Callable[[List[Any]], Any]],
uniques: Set[Any],
) -> List[_R]:
if make_row:
rows = [make_row(row) for row in rows]
if strategy:
made_rows = (
(made_row, strategy(made_row)) for made_row in rows
)
else:
made_rows = ((made_row, made_row) for made_row in rows)
return [
made_row
for made_row, sig_row in made_rows
if sig_row not in uniques and not uniques.add(sig_row) # type: ignore # noqa: E501
]
def manyrows(
self: ResultInternal[_R], num: Optional[int]
) -> List[_R]:
collect: List[_R] = []
_manyrows = self._fetchmany_impl
if num is None:
# if None is passed, we don't know the default
# manyrows number, DBAPI has this as cursor.arraysize
# different DBAPIs / fetch strategies may be different.
# do a fetch to find what the number is. if there are
# only fewer rows left, then it doesn't matter.
real_result = (
self._real_result
if self._real_result
else cast("Result[Unpack[TupleAny]]", self)
)
if real_result._yield_per:
num_required = num = real_result._yield_per
else:
rows = _manyrows(num)
num = len(rows)
assert make_row is not None
collect.extend(
filterrows(make_row, rows, strategy, uniques)
)
num_required = num - len(collect)
else:
num_required = num
assert num is not None
while num_required:
rows = _manyrows(num_required)
if not rows:
break
collect.extend(
filterrows(make_row, rows, strategy, uniques)
)
num_required = num - len(collect)
if post_creational_filter:
collect = [post_creational_filter(row) for row in collect]
return collect
else:
def manyrows(
self: ResultInternal[_R], num: Optional[int]
) -> List[_R]:
if num is None:
real_result = (
self._real_result
if self._real_result
else cast("Result[Unpack[TupleAny]]", self)
)
num = real_result._yield_per
rows: List[_InterimRowType[Any]] = self._fetchmany_impl(num)
if make_row:
rows = [make_row(row) for row in rows]
if post_creational_filter:
rows = [post_creational_filter(row) for row in rows]
return rows # type: ignore
return manyrows
@overload
def _only_one_row(
self: ResultInternal[Row[_T, Unpack[TupleAny]]],
raise_for_second_row: bool,
raise_for_none: bool,
scalar: Literal[True],
) -> _T: ...
@overload
def _only_one_row(
self,
raise_for_second_row: bool,
raise_for_none: Literal[True],
scalar: bool,
) -> _R: ...
@overload
def _only_one_row(
self,
raise_for_second_row: bool,
raise_for_none: bool,
scalar: bool,
) -> Optional[_R]: ...
def _only_one_row(
self,
raise_for_second_row: bool,
raise_for_none: bool,
scalar: bool,
) -> Optional[_R]:
onerow = self._fetchone_impl
row: Optional[_InterimRowType[Any]] = onerow(hard_close=True)
if row is None:
if raise_for_none:
raise exc.NoResultFound(
"No row was found when one was required"
)
else:
return None
if scalar and self._source_supports_scalars:
self._generate_rows = False
make_row = None
else:
make_row = self._row_getter
try:
row = make_row(row) if make_row else row
except:
self._soft_close(hard=True)
raise
if raise_for_second_row:
if self._unique_filter_state:
# for no second row but uniqueness, need to essentially
# consume the entire result :(
uniques, strategy = self._unique_strategy
existing_row_hash = strategy(row) if strategy else row
while True:
next_row: Any = onerow(hard_close=True)
if next_row is None:
next_row = _NO_ROW
break
try:
next_row = make_row(next_row) if make_row else next_row
if strategy:
assert next_row is not _NO_ROW
if existing_row_hash == strategy(next_row):
continue
elif row == next_row:
continue
# here, we have a row and it's different
break
except:
self._soft_close(hard=True)
raise
else:
next_row = onerow(hard_close=True)
if next_row is None:
next_row = _NO_ROW
if next_row is not _NO_ROW:
self._soft_close(hard=True)
raise exc.MultipleResultsFound(
"Multiple rows were found when exactly one was required"
if raise_for_none
else "Multiple rows were found when one or none "
"was required"
)
else:
# if we checked for second row then that would have
# closed us :)
self._soft_close(hard=True)
if not scalar:
post_creational_filter = self._post_creational_filter
if post_creational_filter:
row = post_creational_filter(row)
if scalar and make_row:
return row[0] # type: ignore
else:
return row # type: ignore
def _iter_impl(self) -> Iterator[_R]:
return self._iterator_getter(self)
def _next_impl(self) -> _R:
row = self._onerow_getter(self)
if row is _NO_ROW:
raise StopIteration()
else:
return row
@_generative
def _column_slices(self, indexes: Sequence[_KeyIndexType]) -> Self:
real_result = (
self._real_result
if self._real_result
else cast("Result[Any]", self)
)
if not real_result._source_supports_scalars or len(indexes) != 1:
self._metadata = self._metadata._reduce(indexes)
assert self._generate_rows
return self
@HasMemoized.memoized_attribute
def _unique_strategy(self) -> _UniqueFilterStateType:
assert self._unique_filter_state is not None
uniques, strategy = self._unique_filter_state
real_result = (
self._real_result
if self._real_result is not None
else cast("Result[Unpack[TupleAny]]", self)
)
if not strategy and self._metadata._unique_filters:
if (
real_result._source_supports_scalars
and not self._generate_rows
):
strategy = self._metadata._unique_filters[0]
else:
filters = self._metadata._unique_filters
if self._metadata._tuplefilter:
filters = self._metadata._tuplefilter(filters)
strategy = operator.methodcaller("_filter_on_values", filters)
return uniques, strategy
| ResultInternal |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 2883,
"end": 3276
} | class ____(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
| NoUnitTestSetup |
python | chroma-core__chroma | chromadb/errors.py | {
"start": 2296,
"end": 2501
} | class ____(ChromaError):
@overrides
def code(self) -> int:
return 413
@classmethod
@overrides
def name(cls) -> str:
return "BatchSizeExceededError"
| BatchSizeExceededError |
python | getsentry__sentry | tests/sentry/dynamic_sampling/tasks/test_custom_rule_notifications.py | {
"start": 509,
"end": 4198
} | class ____(TestCase, SnubaTestCase):
def create_transaction(self) -> Event:
data = load_data("transaction")
return self.store_event(data, project_id=self.project.id)
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="radu@sentry.io", username="raduw", name="RaduW")
now = datetime.now(timezone.utc) - timedelta(minutes=2)
condition = {
"op": "and",
"inner": [
{"op": "eq", "name": "event.environment", "value": "dev"},
{"op": "eq", "name": "event.tags.event.type", "value": "transaction"},
],
}
query = "event.type:transaction environment:dev"
self.rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=now,
end=now + timedelta(days=1),
project_ids=[],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query=query,
created_by_id=self.user.id,
)
def test_get_num_samples(self) -> None:
"""
Tests that the num_samples function returns the correct number of samples
"""
# We cannot query the discover_transactions entity without a project being defined for the org
self.create_project()
num_samples = get_num_samples(self.rule)
assert num_samples == 0
self.create_transaction()
self.create_transaction()
self.create_transaction()
num_samples = get_num_samples(self.rule)
assert num_samples == 3
@mock.patch("sentry.dynamic_sampling.tasks.custom_rule_notifications.send_notification")
def test_email_is_sent_when_enough_samples_have_been_collected(
self, send_notification_mock: mock.MagicMock
) -> None:
for idx in range(MIN_SAMPLES_FOR_NOTIFICATION):
self.create_transaction()
# (RaduW) not sure why I need this, store_event seems to take a while
time.sleep(1.0)
# the rule should not have notified anybody yet
self.rule.refresh_from_db()
assert not self.rule.notification_sent
# we have enough samples now so an email should be sent
with self.tasks():
custom_rule_notifications()
# test we sent an email
send_notification_mock.assert_called_once()
# test the rule was marked as notification_sent
self.rule.refresh_from_db()
assert self.rule.notification_sent
def test_clean_custom_rule_notifications(self) -> None:
"""
Tests that expired rules are deactivated
"""
# create an expired rule
start = datetime.now(timezone.utc) - timedelta(hours=2)
end = datetime.now(timezone.utc) - timedelta(minutes=2)
condition = {"op": "eq", "name": "event.tags.event.type", "value": "transaction"}
query = "event.type:transaction"
expired_rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=start,
end=end,
project_ids=[],
organization_id=self.organization.id,
num_samples=100,
sample_rate=0.5,
query=query,
created_by_id=self.user.id,
)
# not expired yet
assert expired_rule.is_active
assert self.rule.is_active
with self.tasks():
clean_custom_rule_notifications()
self.rule.refresh_from_db()
assert self.rule.is_active
expired_rule.refresh_from_db()
assert not expired_rule.is_active
| CustomRuleNotificationsTest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/asset_daemon_scenario.py | {
"start": 2270,
"end": 4400
} | class ____(NamedTuple):
"""Provides a convenient way to specify information about an AutoMaterializeRuleEvaluation
that is expected to exist within the context of a test.
Args:
rule (AutoMaterializeRule): The rule that will exist on the evaluation.
partitions (Optional[Sequence[str]]): The partition keys that this rule evaluation will
apply to.
rule_evaluation_data (Optional[AutoMaterializeRuleEvaluationData]): The specific rule
evaluation data that will exist on the evaluation.
"""
rule: dg.AutoMaterializeRule
partitions: Optional[Sequence[str]] = None
rule_evaluation_data: Optional[AutoMaterializeRuleEvaluationData] = None
def with_rule_evaluation_data(
self, data_type: type[AutoMaterializeRuleEvaluationData], **kwargs
) -> "AssetRuleEvaluationSpec":
"""Adds rule evaluation data of the given type to this spec. Formats keyword which are sets
of CoercibleToAssetKey into frozensets of AssetKey for convenience.
"""
transformed_kwargs = {
key: frozenset(AssetKey.from_coercible(v) for v in value)
if isinstance(value, set)
else value
for key, value in kwargs.items()
}
return self._replace(
rule_evaluation_data=data_type(**transformed_kwargs),
)
def resolve(self, asset_key: AssetKey, asset_graph: BaseAssetGraph) -> AssetSubsetWithMetadata:
"""Returns a tuple of the resolved AutoMaterializeRuleEvaluation for this spec and the
partitions that it applies to.
"""
subset = ValidAssetSubset.from_asset_partitions_set(
asset_key,
asset_graph.get(asset_key).partitions_def,
{
AssetKeyPartitionKey(asset_key, partition_key)
for partition_key in self.partitions or [None]
},
)
metadata = self.rule_evaluation_data.metadata if self.rule_evaluation_data else {}
return AssetSubsetWithMetadata(subset=subset, metadata=metadata)
@dataclass(frozen=True)
| AssetRuleEvaluationSpec |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 17218,
"end": 17901
} | class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateMixin,
UpdateModelMixin,
GenericViewSet,
):
model = Notification
lookup_field = "pk"
lookup_url_kwarg = "notification_pk"
serializer_class = NotificationSerializer
filterset_class = NotificationFilter
# We don't want to show notifications to users that don't have admin access to the project.
permission_classes = [IsAuthenticated & IsProjectAdmin]
def get_queryset(self):
project = self._get_parent_project()
return project.notifications.all()
| NotificationsProjectViewSet |
python | great-expectations__great_expectations | tests/datasource/fluent/test_pandas_google_cloud_storage_datasource.py | {
"start": 924,
"end": 8101
} | class ____:
# noinspection PyMethodMayBeStatic,PyUnusedLocal
def list_blobs(
self,
bucket_or_name,
max_results=None,
prefix=None,
delimiter=None,
**kwargs,
) -> Iterator:
return iter([])
def _build_pandas_gcs_datasource(
gcs_options: Dict[str, Any] | None = None,
) -> PandasGoogleCloudStorageDatasource:
gcs_client: google.Client = cast("google.Client", MockGCSClient())
pandas_gcs_datasource = PandasGoogleCloudStorageDatasource(
name="pandas_gcs_datasource",
bucket_or_name="test_bucket",
gcs_options=gcs_options or {},
)
pandas_gcs_datasource._gcs_client = gcs_client
return pandas_gcs_datasource
@pytest.fixture
def pandas_gcs_datasource() -> PandasGoogleCloudStorageDatasource:
pandas_gcs_datasource: PandasGoogleCloudStorageDatasource = _build_pandas_gcs_datasource()
return pandas_gcs_datasource
@pytest.fixture
def object_keys() -> List[str]:
return [
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
]
@pytest.mark.unit
def test_construct_pandas_gcs_datasource_without_gcs_options():
google_cred_file = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if not google_cred_file:
pytest.skip('No "GOOGLE_APPLICATION_CREDENTIALS" environment variable found.')
pandas_gcs_datasource = PandasGoogleCloudStorageDatasource(
name="pandas_gcs_datasource",
bucket_or_name="test_bucket",
gcs_options={},
)
gcs_client: google.Client = pandas_gcs_datasource._get_gcs_client()
assert gcs_client is not None
assert pandas_gcs_datasource.name == "pandas_gcs_datasource"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.oauth2.service_account.Credentials.from_service_account_file")
@mock.patch("google.cloud.storage.Client")
def test_construct_pandas_gcs_datasource_with_filename_in_gcs_options(
mock_gcs_client, mock_gcs_service_account_credentials, mock_list_keys
):
pandas_gcs_datasource = PandasGoogleCloudStorageDatasource(
name="pandas_gcs_datasource",
bucket_or_name="test_bucket",
gcs_options={
"filename": "my_filename.csv",
},
)
gcs_client: google.Client = pandas_gcs_datasource._get_gcs_client()
assert gcs_client is not None
assert pandas_gcs_datasource.name == "pandas_gcs_datasource"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.oauth2.service_account.Credentials.from_service_account_info")
@mock.patch("google.cloud.storage.Client")
def test_construct_pandas_gcs_datasource_with_info_in_gcs_options(
mock_gcs_client, mock_gcs_service_account_credentials, mock_list_keys
):
pandas_gcs_datasource = PandasGoogleCloudStorageDatasource(
name="pandas_gcs_datasource",
bucket_or_name="test_bucket",
gcs_options={
"info": "{my_csv: my_content,}",
},
)
gcs_client: google.Client = pandas_gcs_datasource._get_gcs_client()
assert gcs_client is not None
assert pandas_gcs_datasource.name == "pandas_gcs_datasource"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.cloud.storage.Client")
def test_add_csv_asset_to_datasource(
mock_gcs_client,
mock_list_keys,
object_keys: List[str],
pandas_gcs_datasource: PandasGoogleCloudStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset = pandas_gcs_datasource.add_csv_asset(
name="csv_asset",
)
assert asset.name == "csv_asset"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.cloud.storage.Client")
def test_construct_csv_asset_directly(mock_gcs_client, mock_list_keys, object_keys: List[str]):
mock_list_keys.return_value = object_keys
asset = CSVAsset( # type: ignore[call-arg] # FIXME CoP
name="csv_asset",
)
assert asset.name == "csv_asset"
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.cloud.storage.Client")
def test_csv_asset_with_batching_regex_named_parameters(
mock_gcs_client,
mock_list_keys,
object_keys: List[str],
pandas_gcs_datasource: PandasGoogleCloudStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset = pandas_gcs_datasource.add_csv_asset(
name="csv_asset",
)
batching_regex = r"yellow_tripdata_sample_(?P<year>\d{4})-(?P<month>\d{2})\.csv"
batch_def = asset.add_batch_definition_monthly(name="batch def", regex=batching_regex)
options = asset.get_batch_parameters_keys(partitioner=batch_def.partitioner)
assert options == ("path", "year", "month")
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.cloud.storage.Client")
def test_csv_asset_with_non_string_batching_regex_named_parameters(
mock_gcs_client,
mock_list_keys,
object_keys: List[str],
pandas_gcs_datasource: PandasGoogleCloudStorageDatasource,
):
mock_list_keys.return_value = object_keys
asset = pandas_gcs_datasource.add_csv_asset(
name="csv_asset",
)
with pytest.raises(ge_exceptions.InvalidBatchRequestError):
# price is an int which will raise an error
asset.build_batch_request({"name": "alex", "timestamp": "1234567890", "price": 1300})
@pytest.mark.unit
@mock.patch(
"great_expectations.datasource.fluent.data_asset.data_connector.google_cloud_storage_data_connector.list_gcs_keys"
)
@mock.patch("google.cloud.storage.Client")
def test_add_csv_asset_with_recursive_file_discovery_to_datasource(
mock_gcs_client,
mock_list_keys,
object_keys: List[str],
pandas_gcs_datasource: PandasGoogleCloudStorageDatasource,
):
"""
Tests that the gcs_recursive_file_discovery-flag is passed on
to the list_keys-function as the recursive-parameter
This makes the list_keys-function search and return files also
from sub-directories on GCS, not just the files in the folder
specified with the abs_name_starts_with-parameter
"""
mock_list_keys.return_value = object_keys
pandas_gcs_datasource.add_csv_asset(
name="csv_asset",
gcs_recursive_file_discovery=True,
)
assert "recursive" in mock_list_keys.call_args.kwargs
assert mock_list_keys.call_args.kwargs["recursive"] is True
| MockGCSClient |
python | Netflix__metaflow | test/data/__init__.py | {
"start": 398,
"end": 527
} | class ____(FlowSpec):
def __init__(self, name="FakeFlow", use_cli=False):
self.name = name
DO_TEST_RUN = False
| FakeFlow |
python | tensorflow__tensorflow | tensorflow/python/keras/losses.py | {
"start": 23124,
"end": 26361
} | class ____(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Standalone usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> cce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> cce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
"""Initializes `CategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. For example, if
`0.1`, use `0.1 / num_classes` for non-target labels and
`0.9 + 0.1 / num_classes` for target labels.
axis: The axis along which to compute crossentropy (the features axis).
Defaults to -1.
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance.
Defaults to 'categorical_crossentropy'.
"""
super().__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing,
axis=axis)
| CategoricalCrossentropy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefault2.py | {
"start": 1484,
"end": 1588
} | class ____[*Ts = T1]: ...
# This should generate an error because default must be unpacked tuple.
| ClassTs5 |
python | kamyu104__LeetCode-Solutions | Python/relative-sort-array.py | {
"start": 33,
"end": 334
} | class ____(object):
def relativeSortArray(self, arr1, arr2):
"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: List[int]
"""
lookup = {v: i for i, v in enumerate(arr2)}
return sorted(arr1, key=lambda i: lookup.get(i, len(arr2)+i))
| Solution |
python | spulec__freezegun | tests/test_datetimes.py | {
"start": 21239,
"end": 21569
} | class ____(BaseInheritanceFreezableTests):
def test_time_is_not_frozen(self) -> None:
# In this class, time should not be frozen - and the below decorated
# class shouldn't affect that
self.assertNotEqual(datetime.date(2013, 4, 9), datetime.date.today())
@freeze_time('2013-04-09')
| UnfrozenInheritedTests |
python | facebook__pyre-check | client/commands/server_event.py | {
"start": 2924,
"end": 3486
} | class ____(Exception):
kind: ErrorKind
def __init__(self, exception_event: ServerException) -> None:
super().__init__(exception_event.message)
self.kind = exception_event.kind
def _parse_server_event(event_string: str) -> Event:
event = create_from_string(event_string)
if event is None:
raise EventParsingException(
f"Unrecognized status update from server: {event_string}"
)
elif isinstance(event, ServerException):
raise ServerStartException(event)
return event
| ServerStartException |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_connections.py | {
"start": 2710,
"end": 3102
} | class ____:
@pytest.fixture(autouse=True)
def setup(self) -> None:
clear_test_connections(False)
clear_db_connections(False)
clear_db_logs()
def teardown_method(self) -> None:
clear_db_connections()
def create_connection(self):
_create_connection()
def create_connections(self):
_create_connections()
| TestConnectionEndpoint |
python | pytorch__pytorch | torchgen/api/autograd.py | {
"start": 2153,
"end": 3279
} | class ____:
# The formula string (legit C++ expression).
# Note that special keywords such as "linear" or "element_wise" have been
# replaced by the automatically generated formula.
formula: str
# Name of the output arguments for which this formula calculates forward
# derivatives
var_names: tuple[str, ...]
# Type of the output arguments for which this formula calculates forward
# derivatives
var_types: tuple[Type, ...]
# Inputs for which the forward derivatives are required for this formula
required_inputs_fw_grad: tuple[str, ...] | None
# Inputs for which the primal is required for this formula
required_inputs_primal: tuple[str, ...] | None
# Flag to specify if this formula requires the original value of self
# This is only used by inplace operations
required_original_self_value: bool
# If this formula is specified in derivatives.yaml or if we are reusing the
# out of place formula for inplace
is_reusing_outplace_formula: bool
# Represents differentiability info for a NativeFunction.
@dataclass(frozen=True)
| ForwardDerivative |
python | chroma-core__chroma | chromadb/test/property/strategies.py | {
"start": 2873,
"end": 3155
} | class ____(TypedDict):
"""
Represents the internal state of a state machine in hypothesis tests.
"""
ids: List[types.ID]
embeddings: types.Embeddings
metadatas: List[Optional[types.Metadata]]
documents: List[Optional[types.Document]]
| StateMachineRecordSet |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 9609,
"end": 9787
} | class ____(_ConfigUpdateModel):
bm25: Optional[_BM25ConfigUpdate]
cleanupIntervalSeconds: Optional[int]
stopwords: Optional[_StopwordsUpdate]
| _InvertedIndexConfigUpdate |
python | pytorch__pytorch | torch/distributions/one_hot_categorical.py | {
"start": 4433,
"end": 5028
} | class ____(OneHotCategorical):
r"""
Creates a reparameterizable :class:`OneHotCategorical` distribution based on the straight-
through gradient estimator from [1].
[1] Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation
(Bengio et al., 2013)
"""
has_rsample = True
def rsample(self, sample_shape: _size = torch.Size()) -> Tensor:
samples = self.sample(sample_shape)
probs = self._categorical.probs # cached via @lazy_property
return samples + (probs - probs.detach())
| OneHotCategoricalStraightThrough |
python | apache__airflow | providers/mysql/tests/unit/mysql/hooks/test_mysql.py | {
"start": 12236,
"end": 12579
} | class ____:
DEFAULT_AUTOCOMMIT = "default"
def __init__(self):
self._autocommit = self.DEFAULT_AUTOCOMMIT
@property
def autocommit(self):
return self._autocommit
@autocommit.setter
def autocommit(self, autocommit):
self._autocommit = autocommit
@pytest.mark.db_test
| MockMySQLConnectorConnection |
python | ray-project__ray | python/ray/_private/services.py | {
"start": 7955,
"end": 93871
} | class ____(subprocess.Popen):
if sys.platform == "win32":
def terminate(self):
if isinstance(self.stdin, io.IOBase):
self.stdin.close()
if self._use_signals:
self.send_signal(signal.CTRL_BREAK_EVENT)
else:
super(ConsolePopen, self).terminate()
def __init__(self, *args, **kwargs):
# CREATE_NEW_PROCESS_GROUP is used to send Ctrl+C on Windows:
# https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal
new_pgroup = subprocess.CREATE_NEW_PROCESS_GROUP
flags_to_add = 0
if ray._private.utils.detect_fate_sharing_support():
# If we don't have kernel-mode fate-sharing, then don't do this
# because our children need to be in out process group for
# the process reaper to properly terminate them.
flags_to_add = new_pgroup
flags_key = "creationflags"
if flags_to_add:
kwargs[flags_key] = (kwargs.get(flags_key) or 0) | flags_to_add
self._use_signals = kwargs[flags_key] & new_pgroup
super(ConsolePopen, self).__init__(*args, **kwargs)
def _find_address_from_flag(flag: str):
"""
Attempts to find all valid Ray addresses on this node, specified by the
flag.
Params:
flag: `--redis-address` or `--gcs-address`
Returns:
Set of detected addresses.
"""
# Using Redis address `--redis-address` as an example:
# Currently, this extracts the deprecated --redis-address from the command
# that launched the raylet running on this node, if any. Anyone looking to
# edit this function should be warned that these commands look like, for
# example:
# /usr/local/lib/python3.8/dist-packages/ray/core/src/ray/raylet/raylet
# --redis_address=123.456.78.910 --node_ip_address=123.456.78.910
# --raylet_socket_name=... --store_socket_name=... --object_manager_port=0
# --min_worker_port=10000 --max_worker_port=19999
# --node_manager_port=58578 --redis_port=6379
# --maximum_startup_concurrency=8
# --static_resource_list=node:123.456.78.910,1.0,object_store_memory,66
# --config_list=plasma_store_as_thread,True
# --python_worker_command=/usr/bin/python
# /usr/local/lib/python3.8/dist-packages/ray/workers/default_worker.py
# --redis-address=123.456.78.910:6379
# --node-ip-address=123.456.78.910 --node-manager-port=58578
# --object-store-name=... --raylet-name=...
# --temp-dir=/tmp/ray
# --metrics-agent-port=41856 --redis-password=[MASKED]
# --java_worker_command= --cpp_worker_command=
# --redis_password=[MASKED] --temp_dir=/tmp/ray --session_dir=...
# --metrics-agent-port=41856 --metrics_export_port=64229
# --dashboard_agent_command=/usr/bin/python
# -u /usr/local/lib/python3.8/dist-packages/ray/dashboard/agent.py
# --redis-address=123.456.78.910:6379 --metrics-export-port=64229
# --dashboard-agent-port=41856 --node-manager-port=58578
# --object-store-name=... --raylet-name=... --temp-dir=/tmp/ray
# --log-dir=/tmp/ray/session_2020-11-08_14-29-07_199128_278000/logs
# --redis-password=[MASKED] --object_store_memory=5037192806
# --plasma_directory=/tmp
# Longer arguments are elided with ... but all arguments from this instance
# are included, to provide a sense of what is in these.
# Indeed, we had to pull --redis-address to the front of each call to make
# this readable.
# As you can see, this is very long and complex, which is why we can't
# simply extract all the arguments using regular expressions and
# present a dict as if we never lost track of these arguments, for
# example. Picking out --redis-address below looks like it might grab the
# wrong thing, but double-checking that we're finding the correct process
# by checking that the contents look like we expect would probably be prone
# to choking in unexpected ways.
# Notice that --redis-address appears twice. This is not a copy-paste
# error; this is the reason why the for loop below attempts to pick out
# every appearance of --redis-address.
# The --redis-address here is what is now called the --address, but it
# appears in the default_worker.py and agent.py calls as --redis-address.
addresses = set()
for proc in psutil.process_iter(["cmdline"]):
try:
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
cmdline = proc.info["cmdline"]
# NOTE(kfstorm): To support Windows, we can't use
# `os.path.basename(cmdline[0]) == "raylet"` here.
if _is_raylet_process(cmdline):
for arglist in cmdline:
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith(flag):
proc_addr = arg.split("=")[1]
# TODO(mwtian): remove this workaround after Ray
# no longer sets --redis-address to None.
if proc_addr != "" and proc_addr != "None":
addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return addresses
def find_gcs_addresses():
"""Finds any local GCS processes based on grepping ps."""
return _find_address_from_flag("--gcs-address")
def find_bootstrap_address(temp_dir: Optional[str]):
"""Finds the latest Ray cluster address to connect to, if any. This is the
GCS address connected to by the last successful `ray start`."""
return ray._private.utils.read_ray_address(temp_dir)
def get_ray_address_from_environment(addr: str, temp_dir: Optional[str]):
"""Attempts to find the address of Ray cluster to use, in this order:
1. Use RAY_ADDRESS if defined and nonempty.
2. If no address is provided or the provided address is "auto", use the
address in /tmp/ray/ray_current_cluster if available. This will error if
the specified address is None and there is no address found. For "auto",
we will fallback to connecting to any detected Ray cluster (legacy).
3. Otherwise, use the provided address.
Returns:
A string to pass into `ray.init(address=...)`, e.g. ip:port, `auto`.
"""
env_addr = os.environ.get(ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if env_addr is not None and env_addr != "":
addr = env_addr
if addr is not None and addr != "auto":
return addr
# We should try to automatically find an active local instance.
gcs_addrs = find_gcs_addresses()
bootstrap_addr = find_bootstrap_address(temp_dir)
if len(gcs_addrs) > 1 and bootstrap_addr is not None:
logger.warning(
f"Found multiple active Ray instances: {gcs_addrs}. "
f"Connecting to latest cluster at {bootstrap_addr}. "
"You can override this by setting the `--address` flag "
"or `RAY_ADDRESS` environment variable."
)
elif len(gcs_addrs) > 0 and addr == "auto":
# Preserve legacy "auto" behavior of connecting to any cluster, even if not
# started with ray start. However if addr is None, we will raise an error.
bootstrap_addr = list(gcs_addrs).pop()
if bootstrap_addr is None:
if addr is None:
# Caller should start a new instance.
return None
else:
raise ConnectionError(
"Could not find any running Ray instance. "
"Please specify the one to connect to by setting `--address` flag "
"or `RAY_ADDRESS` environment variable."
)
return bootstrap_addr
def wait_for_node(
gcs_address: str,
node_plasma_store_socket_name: str,
timeout: int = _timeout,
):
"""Wait until this node has appeared in the client table.
NOTE: Makes an RPC to the GCS up to every 0.1 seconds to
get all node info. Use only for testing.
Args:
gcs_address: The gcs address
node_plasma_store_socket_name: The
plasma_store_socket_name for the given node which we wait for.
timeout: The amount of time in seconds to wait before raising an
exception.
Raises:
TimeoutError: An exception is raised if the timeout expires before
the node appears in the client table.
"""
gcs_options = GcsClientOptions.create(
gcs_address, None, allow_cluster_id_nil=True, fetch_cluster_id_if_nil=False
)
global_state = ray._private.state.GlobalState()
global_state._initialize_global_state(gcs_options)
start_time = time.time()
while time.time() - start_time < timeout:
clients = global_state.node_table()
object_store_socket_names = [
client["ObjectStoreSocketName"] for client in clients
]
if node_plasma_store_socket_name in object_store_socket_names:
return
else:
time.sleep(0.1)
raise TimeoutError(
f"Timed out after {timeout} seconds while waiting for node to startup. "
f"Did not find socket name {node_plasma_store_socket_name} in the list "
"of object store socket names."
)
def get_node_to_connect_for_driver(gcs_address, node_ip_address):
# Get node table from global state accessor.
global_state = ray._private.state.GlobalState()
gcs_options = _get_gcs_client_options(gcs_address)
global_state._initialize_global_state(gcs_options)
return global_state.get_node_to_connect_for_driver(node_ip_address)
def get_node(gcs_address, node_id):
"""
Get the node information from the global state accessor.
"""
global_state = ray._private.state.GlobalState()
gcs_options = _get_gcs_client_options(gcs_address)
global_state._initialize_global_state(gcs_options)
return global_state.get_node(node_id)
def get_webui_url_from_internal_kv():
assert ray.experimental.internal_kv._internal_kv_initialized()
webui_url = ray.experimental.internal_kv._internal_kv_get(
"webui:url", namespace=ray_constants.KV_NAMESPACE_DASHBOARD
)
return ray._common.utils.decode(webui_url) if webui_url is not None else None
def remaining_processes_alive():
"""See if the remaining processes are alive or not.
Note that this ignores processes that have been explicitly killed,
e.g., via a command like node.kill_raylet().
Returns:
True if the remaining processes started by ray.init() are alive and
False otherwise.
Raises:
Exception: An exception is raised if the processes were not started by
ray.init().
"""
if ray._private.worker._global_node is None:
raise RuntimeError(
"This process is not in a position to determine "
"whether all processes are alive or not."
)
return ray._private.worker._global_node.remaining_processes_alive()
def canonicalize_bootstrap_address(
addr: str, temp_dir: Optional[str] = None
) -> Optional[str]:
"""Canonicalizes Ray cluster bootstrap address to host:port.
Reads address from the environment if needed.
This function should be used to process user supplied Ray cluster address,
via ray.init() or `--address` flags, before using the address to connect.
Returns:
Ray cluster address string in <host:port> format or None if the caller
should start a local Ray instance.
"""
if addr is None or addr == "auto":
addr = get_ray_address_from_environment(addr, temp_dir)
if addr is None or addr == "local":
return None
parsed = parse_address(addr)
if parsed is None:
raise ValueError(f"Invalid address format: {addr}")
host, port = parsed
try:
bootstrap_host = resolve_ip_for_localhost(host)
except Exception:
logger.exception(f"Failed to convert {addr} to host:port")
raise
return build_address(bootstrap_host, port)
def canonicalize_bootstrap_address_or_die(
addr: str, temp_dir: Optional[str] = None
) -> str:
"""Canonicalizes Ray cluster bootstrap address to host:port.
This function should be used when the caller expects there to be an active
and local Ray instance. If no address is provided or address="auto", this
will autodetect the latest Ray instance created with `ray start`.
For convenience, if no address can be autodetected, this function will also
look for any running local GCS processes, based on pgrep output. This is to
allow easier use of Ray CLIs when debugging a local Ray instance (whose GCS
addresses are not recorded).
Returns:
Ray cluster address string in <host:port> format. Throws a
ConnectionError if zero or multiple active Ray instances are
autodetected.
"""
bootstrap_addr = canonicalize_bootstrap_address(addr, temp_dir=temp_dir)
if bootstrap_addr is not None:
return bootstrap_addr
running_gcs_addresses = find_gcs_addresses()
if len(running_gcs_addresses) == 0:
raise ConnectionError(
"Could not find any running Ray instance. "
"Please specify the one to connect to by setting the `--address` "
"flag or `RAY_ADDRESS` environment variable."
)
if len(running_gcs_addresses) > 1:
raise ConnectionError(
f"Found multiple active Ray instances: {running_gcs_addresses}. "
"Please specify the one to connect to by setting the `--address` "
"flag or `RAY_ADDRESS` environment variable."
)
return running_gcs_addresses.pop()
def extract_ip_port(bootstrap_address: str):
ip_port = parse_address(bootstrap_address)
if ip_port is None:
raise ValueError(
f"Malformed address {bootstrap_address}. " f"Expected '<host>:<port>'."
)
ip, port = ip_port
try:
port = int(port)
except ValueError:
raise ValueError(f"Malformed address port {port}. Must be an integer.")
if port < 1024 or port > 65535:
raise ValueError(
f"Invalid address port {port}. Must be between 1024 "
"and 65535 (inclusive)."
)
return ip, port
def resolve_ip_for_localhost(host: str):
"""Convert to a remotely reachable IP if the host is "localhost",
"127.0.0.1", or "::1". Otherwise do nothing.
Args:
host: The hostname or IP address.
Returns:
The same host but with the local host replaced by remotely
reachable IP.
"""
if not host:
raise ValueError(f"Malformed host: {host}")
if host == "127.0.0.1" or host == "::1" or host == "localhost":
# Make sure localhost isn't resolved to the loopback ip
return get_node_ip_address()
else:
return host
# NOTE: This API should not be used when you obtain the
# IP address when ray.init is not called because
# it cannot find the IP address if it is specified by
# ray start --node-ip-address. You should instead use
# get_cached_node_ip_address.
def get_node_ip_address(address=None):
if ray._private.worker._global_node is not None:
return ray._private.worker._global_node.node_ip_address
if not ray_constants.ENABLE_RAY_CLUSTER:
# Use loopback IP as the local IP address to prevent bothersome
# firewall popups on OSX and Windows.
# https://github.com/ray-project/ray/issues/18730.
return get_localhost_ip()
return node_ip_address_from_perspective(address)
def get_cached_node_ip_address(session_dir: str) -> str:
"""Get a node address cached on this session.
If a ray instance is started by `ray start --node-ip-address`,
the node ip address is cached to a file RAY_NODE_IP_FILENAME.
Otherwise, the file exists, but it is emptyl.
This API is process-safe, meaning the file access is protected by
a file lock.
Args:
session_dir: Path to the Ray session directory.
Returns:
node_ip_address cached on the current node. None if the node
the file doesn't exist, meaning ray instance hasn't been
started on a current node. If node_ip_address is not written
to a file, it means --node-ip-address is not given, and in this
case, we find the IP address ourselves.
"""
file_path = Path(os.path.join(session_dir, RAY_NODE_IP_FILENAME))
cached_node_ip_address = {}
with FileLock(str(file_path.absolute()) + ".lock"):
if not file_path.exists():
return None
with file_path.open() as f:
cached_node_ip_address.update(json.load(f))
if "node_ip_address" in cached_node_ip_address:
return cached_node_ip_address["node_ip_address"]
else:
return ray.util.get_node_ip_address()
def write_node_ip_address(session_dir: str, node_ip_address: Optional[str]) -> None:
"""Write a node ip address of the current session to
RAY_NODE_IP_FILENAME.
If a ray instance is started by `ray start --node-ip-address`,
the node ip address is cached to a file RAY_NODE_IP_FILENAME.
This API is process-safe, meaning the file access is protected by
a file lock.
The file contains a single string node_ip_address. If nothing
is written, it means --node-ip-address was not given, and Ray
resolves the IP address on its own. It assumes in a single node,
you can have only 1 IP address (which is the assumption ray
has in general).
node_ip_address is the ip address of the current node.
Args:
session_dir: The path to Ray session directory.
node_ip_address: The node IP address of the current node.
If None, it means the node ip address is not given
by --node-ip-address. In this case, we don't write
anything to a file.
"""
file_path = Path(os.path.join(session_dir, RAY_NODE_IP_FILENAME))
cached_node_ip_address = {}
with FileLock(str(file_path.absolute()) + ".lock"):
if not file_path.exists():
with file_path.open(mode="w") as f:
json.dump({}, f)
with file_path.open() as f:
cached_node_ip_address.update(json.load(f))
cached_node_ip = cached_node_ip_address.get("node_ip_address")
if node_ip_address is not None:
if cached_node_ip:
if cached_node_ip == node_ip_address:
# Nothing to do.
return
else:
logger.warning(
"The node IP address of the current host recorded "
f"in {RAY_NODE_IP_FILENAME} ({cached_node_ip}) "
"is different from the current IP address: "
f"{node_ip_address}. Ray will use {node_ip_address} "
"as the current node's IP address. "
"Creating 2 instances in the same host with different "
"IP address is not supported. "
"Please create an enhnacement request to"
"https://github.com/ray-project/ray/issues."
)
cached_node_ip_address["node_ip_address"] = node_ip_address
with file_path.open(mode="w") as f:
json.dump(cached_node_ip_address, f)
def get_node_instance_id():
"""Get the specified node instance id of the current node.
Returns:
The node instance id of the current node.
"""
return os.getenv("RAY_CLOUD_INSTANCE_ID", "")
def create_redis_client(redis_address, password=None, username=None):
"""Create a Redis client.
Args:
redis_address: The IP address and port of the Redis server.
password: The password for Redis authentication.
username: The username for Redis authentication.
Returns:
A Redis client.
"""
import redis
if not hasattr(create_redis_client, "instances"):
create_redis_client.instances = {}
num_retries = ray_constants.START_REDIS_WAIT_RETRIES
delay = 0.001
for i in range(num_retries):
cli = create_redis_client.instances.get(redis_address)
if cli is None:
redis_ip_address, redis_port = extract_ip_port(
canonicalize_bootstrap_address_or_die(redis_address)
)
cli = redis.StrictRedis(
host=redis_ip_address,
port=int(redis_port),
username=username,
password=password,
)
create_redis_client.instances[redis_address] = cli
try:
cli.ping()
return cli
except Exception as e:
create_redis_client.instances.pop(redis_address)
if i >= num_retries - 1:
raise RuntimeError(
f"Unable to connect to Redis at {redis_address}: {e}"
)
# Wait a little bit.
time.sleep(delay)
# Make sure the retry interval doesn't increase too large.
delay = min(1, delay * 2)
def start_ray_process(
command: List[str],
process_type: str,
fate_share: bool,
env_updates: Optional[dict] = None,
cwd: Optional[str] = None,
use_valgrind: bool = False,
use_gdb: bool = False,
use_valgrind_profiler: bool = False,
use_perftools_profiler: bool = False,
use_tmux: bool = False,
stdout_file: Optional[IO[AnyStr]] = None,
stderr_file: Optional[IO[AnyStr]] = None,
pipe_stdin: bool = False,
):
"""Start one of the Ray processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command: The command to use to start the Ray process.
process_type: The type of the process that is being started
(e.g., "raylet").
fate_share: If true, the child will be killed if its parent (us) dies.
True must only be passed after detection of this functionality.
env_updates: A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd: The directory to run the process in.
use_valgrind: True if we should start the process in valgrind.
use_gdb: True if we should start the process in gdb.
use_valgrind_profiler: True if we should start the process in
the valgrind profiler.
use_perftools_profiler: True if we should profile the process
using perftools.
use_tmux: True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
pipe_stdin: If true, subprocess.PIPE will be passed to the process as
stdin.
Returns:
Information about the process that was started including a handle to
the process that was started.
"""
# Detect which flags are set through environment variables.
valgrind_env_var = f"RAY_{process_type.upper()}_VALGRIND"
if os.environ.get(valgrind_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_env_var)
use_valgrind = True
valgrind_profiler_env_var = f"RAY_{process_type.upper()}_VALGRIND_PROFILER"
if os.environ.get(valgrind_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_profiler_env_var)
use_valgrind_profiler = True
perftools_profiler_env_var = f"RAY_{process_type.upper()}_PERFTOOLS_PROFILER"
if os.environ.get(perftools_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.", perftools_profiler_env_var)
use_perftools_profiler = True
tmux_env_var = f"RAY_{process_type.upper()}_TMUX"
if os.environ.get(tmux_env_var) == "1":
logger.info("Detected environment variable '%s'.", tmux_env_var)
use_tmux = True
gdb_env_var = f"RAY_{process_type.upper()}_GDB"
if os.environ.get(gdb_env_var) == "1":
logger.info("Detected environment variable '%s'.", gdb_env_var)
use_gdb = True
# Jemalloc memory profiling.
if os.environ.get("LD_PRELOAD") is None:
jemalloc_lib_path = os.environ.get(RAY_JEMALLOC_LIB_PATH, JEMALLOC_SO)
jemalloc_conf = os.environ.get(RAY_JEMALLOC_CONF)
jemalloc_comps = os.environ.get(RAY_JEMALLOC_PROFILE)
jemalloc_comps = [] if not jemalloc_comps else jemalloc_comps.split(",")
jemalloc_env_vars = propagate_jemalloc_env_var(
jemalloc_path=jemalloc_lib_path,
jemalloc_conf=jemalloc_conf,
jemalloc_comps=jemalloc_comps,
process_type=process_type,
)
else:
jemalloc_env_vars = {}
use_jemalloc_mem_profiler = "MALLOC_CONF" in jemalloc_env_vars
if (
sum(
[
use_gdb,
use_valgrind,
use_valgrind_profiler,
use_perftools_profiler,
use_jemalloc_mem_profiler,
]
)
> 1
):
raise ValueError(
"At most one of the 'use_gdb', 'use_valgrind', "
"'use_valgrind_profiler', 'use_perftools_profiler', "
"and 'use_jemalloc_mem_profiler' flags can "
"be used at a time."
)
if env_updates is None:
env_updates = {}
if not isinstance(env_updates, dict):
raise ValueError("The 'env_updates' argument must be a dictionary.")
modified_env = os.environ.copy()
modified_env.update(env_updates)
if use_gdb:
if not use_tmux:
raise ValueError(
"If 'use_gdb' is true, then 'use_tmux' must be true as well."
)
# TODO(suquark): Any better temp file creation here?
gdb_init_path = os.path.join(
ray._common.utils.get_ray_temp_dir(),
f"gdb_init_{process_type}_{time.time()}",
)
ray_process_path = command[0]
ray_process_args = command[1:]
run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args])
with open(gdb_init_path, "w") as gdb_init_file:
gdb_init_file.write(f"run {run_args}")
command = ["gdb", ray_process_path, "-x", gdb_init_path]
if use_valgrind:
command = [
"valgrind",
"--track-origins=yes",
"--leak-check=full",
"--show-leak-kinds=all",
"--leak-check-heuristics=stdstring",
"--error-exitcode=1",
] + command
if use_valgrind_profiler:
command = ["valgrind", "--tool=callgrind"] + command
if use_perftools_profiler:
modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"]
modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"]
modified_env.update(jemalloc_env_vars)
if use_tmux:
# The command has to be created exactly as below to ensure that it
# works on all versions of tmux. (Tested with tmux 1.8-5, travis'
# version, and tmux 2.1)
command = ["tmux", "new-session", "-d", f"{' '.join(command)}"]
if fate_share:
assert ray._private.utils.detect_fate_sharing_support(), (
"kernel-level fate-sharing must only be specified if "
"detect_fate_sharing_support() has returned True"
)
def preexec_fn():
import signal
signal.pthread_sigmask(signal.SIG_BLOCK, {signal.SIGINT})
if fate_share and sys.platform.startswith("linux"):
ray._private.utils.set_kill_on_parent_death_linux()
win32_fate_sharing = fate_share and sys.platform == "win32"
# With Windows fate-sharing, we need special care:
# The process must be added to the job before it is allowed to execute.
# Otherwise, there's a race condition: the process might spawn children
# before the process itself is assigned to the job.
# After that point, its children will not be added to the job anymore.
CREATE_SUSPENDED = 0x00000004 # from Windows headers
if sys.platform == "win32":
# CreateProcess, which underlies Popen, is limited to
# 32,767 characters, including the Unicode terminating null
# character
total_chrs = sum([len(x) for x in command])
if total_chrs > 31766:
raise ValueError(
f"command is limited to a total of 31767 characters, "
f"got {total_chrs}"
)
process = ConsolePopen(
command,
env=modified_env,
cwd=cwd,
stdout=stdout_file,
stderr=stderr_file,
stdin=subprocess.PIPE if pipe_stdin else None,
preexec_fn=preexec_fn if sys.platform != "win32" else None,
creationflags=CREATE_SUSPENDED if win32_fate_sharing else 0,
)
if win32_fate_sharing:
try:
ray._private.utils.set_kill_child_on_death_win32(process)
psutil.Process(process.pid).resume()
except (psutil.Error, OSError):
process.kill()
raise
def _get_stream_name(stream):
if stream is not None:
try:
return stream.name
except AttributeError:
return str(stream)
return None
return ProcessInfo(
process=process,
stdout_file=_get_stream_name(stdout_file),
stderr_file=_get_stream_name(stderr_file),
use_valgrind=use_valgrind,
use_gdb=use_gdb,
use_valgrind_profiler=use_valgrind_profiler,
use_perftools_profiler=use_perftools_profiler,
use_tmux=use_tmux,
)
def start_reaper(fate_share=None):
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that ray processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other ray processes without killing the process group of the
# process that started us.
try:
if sys.platform != "win32":
os.setpgrp()
except OSError as e:
errcode = e.errno
if errcode == errno.EPERM and os.getpgrp() == os.getpid():
# Nothing to do; we're already a session leader.
pass
else:
logger.warning(
f"setpgrp failed, processes may not be cleaned up properly: {e}."
)
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "ray_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REAPER,
pipe_stdin=True,
fate_share=fate_share,
)
return process_info
def start_log_monitor(
session_dir: str,
logs_dir: str,
gcs_address: str,
fate_share: Optional[bool] = None,
max_bytes: int = 0,
backup_count: int = 0,
stdout_filepath: Optional[str] = None,
stderr_filepath: Optional[str] = None,
):
"""Start a log monitor process.
Args:
session_dir: The session directory.
logs_dir: The directory of logging files.
gcs_address: GCS address for pubsub.
fate_share: Whether to share fate between log_monitor
and this process.
max_bytes: Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count: Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
redirect_logging: Whether we should redirect logging to
the provided log directory.
stdout_filepath: The file path to dump log monitor stdout.
If None, stdout is not redirected.
stderr_filepath: The file path to dump log monitor stderr.
If None, stderr is not redirected.
Returns:
ProcessInfo for the process that was started.
"""
log_monitor_filepath = os.path.join(RAY_PATH, RAY_PRIVATE_DIR, "log_monitor.py")
command = [
sys.executable,
"-u",
log_monitor_filepath,
f"--session-dir={session_dir}",
f"--logs-dir={logs_dir}",
f"--gcs-address={gcs_address}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
if stdout_filepath:
command.append(f"--stdout-filepath={stdout_filepath}")
if stderr_filepath:
command.append(f"--stderr-filepath={stderr_filepath}")
if stdout_filepath is None and stderr_filepath is None:
# If not redirecting logging to files, unset log filename.
# This will cause log records to go to stderr.
command.append("--logging-filename=")
# Use stderr log format with the component name as a message prefix.
logging_format = ray_constants.LOGGER_FORMAT_STDERR.format(
component=ray_constants.PROCESS_TYPE_LOG_MONITOR
)
command.append(f"--logging-format={logging_format}")
stdout_file = None
if stdout_filepath:
stdout_file = open(os.devnull, "w")
stderr_file = None
if stderr_filepath:
stderr_file = open(os.devnull, "w")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
def start_api_server(
include_dashboard: Optional[bool],
raise_on_failure: bool,
host: str,
gcs_address: str,
cluster_id_hex: str,
node_ip_address: str,
temp_dir: str,
logdir: str,
session_dir: str,
port: Optional[int] = None,
fate_share: Optional[bool] = None,
max_bytes: int = 0,
backup_count: int = 0,
stdout_filepath: Optional[str] = None,
stderr_filepath: Optional[str] = None,
):
"""Start a API server process.
Args:
include_dashboard: If true, this will load all dashboard-related modules
when starting the API server, or fail. If None, it will load all
dashboard-related modules conditioned on dependencies being present.
Otherwise, it will only start the modules that are not relevant to
the dashboard.
raise_on_failure: If true, this will raise an exception
if we fail to start the API server. Otherwise it will print
a warning if we fail to start the API server.
host: The host to bind the dashboard web server to.
gcs_address: The gcs address the dashboard should connect to
cluster_id_hex: Cluster ID in hex.
node_ip_address: The IP address where this is running.
temp_dir: The temporary directory used for log files and
information for this Ray session.
session_dir: The session directory under temp_dir.
It is used as a identifier of individual cluster.
logdir: The log directory used to generate dashboard log.
port: The port to bind the dashboard web server to.
Defaults to 8265.
max_bytes: Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count: Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
stdout_filepath: The file path to dump dashboard stdout.
If None, stdout is not redirected.
stderr_filepath: The file path to dump dashboard stderr.
If None, stderr is not redirected.
Returns:
A tuple of :
- Dashboard URL if dashboard enabled and started.
- ProcessInfo for the process that was started.
"""
try:
# Make sure port is available.
if port is None:
port_retries = 50
port = ray_constants.DEFAULT_DASHBOARD_PORT
else:
port_retries = 0
port_test_socket = socket.socket(
socket.AF_INET6 if is_ipv6(host) else socket.AF_INET,
socket.SOCK_STREAM,
)
port_test_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1,
)
try:
port_test_socket.bind((host, port))
port_test_socket.close()
except socket.error as e:
# 10013 on windows is a bit more broad than just
# "address in use": it can also indicate "permission denied".
# TODO: improve the error message?
if e.errno in {48, 98, 10013}: # address already in use.
raise ValueError(
f"Failed to bind to {host}:{port} because it's "
"already occupied. You can use `ray start "
"--dashboard-port ...` or `ray.init(dashboard_port=..."
")` to select a different port."
)
else:
raise e
# Make sure the process can start.
dashboard_dependency_error = ray._private.utils.get_dashboard_dependency_error()
# Explicitly check here that when the user explicitly specifies
# dashboard inclusion, the install is not minimal.
if include_dashboard and dashboard_dependency_error:
logger.error(
f"Ray dashboard dependencies failed to install properly: {dashboard_dependency_error}.\n"
"Potential causes include:\n"
"1. --include-dashboard is not supported when minimal ray is used. "
"Download ray[default] to use the dashboard.\n"
"2. Dashboard dependencies are conflicting with your python environment. "
"Investigate your python environment and try reinstalling ray[default].\n"
)
raise Exception("Cannot include dashboard with missing packages.")
include_dash: bool = True if include_dashboard is None else include_dashboard
# Start the dashboard process.
dashboard_dir = "dashboard"
dashboard_filepath = os.path.join(RAY_PATH, dashboard_dir, "dashboard.py")
command = [
*_build_python_executable_command_memory_profileable(
ray_constants.PROCESS_TYPE_DASHBOARD,
session_dir,
unbuffered=False,
),
dashboard_filepath,
f"--host={host}",
f"--port={port}",
f"--port-retries={port_retries}",
f"--temp-dir={temp_dir}",
f"--log-dir={logdir}",
f"--session-dir={session_dir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
f"--gcs-address={gcs_address}",
f"--cluster-id-hex={cluster_id_hex}",
f"--node-ip-address={node_ip_address}",
]
if stdout_filepath:
command.append(f"--stdout-filepath={stdout_filepath}")
if stderr_filepath:
command.append(f"--stderr-filepath={stderr_filepath}")
if stdout_filepath is None and stderr_filepath is None:
# If not redirecting logging to files, unset log filename.
# This will cause log records to go to stderr.
command.append("--logging-filename=")
# Use stderr log format with the component name as a message prefix.
logging_format = ray_constants.LOGGER_FORMAT_STDERR.format(
component=ray_constants.PROCESS_TYPE_DASHBOARD
)
command.append(f"--logging-format={logging_format}")
if dashboard_dependency_error is not None:
command.append("--minimal")
if not include_dash:
# If dashboard is not included, load modules
# that are irrelevant to the dashboard.
# TODO(sang): Modules like job or state APIs should be
# loaded although dashboard is disabled. Fix it.
command.append("--modules-to-load=UsageStatsHead")
command.append("--disable-frontend")
stdout_file = None
if stdout_filepath:
stdout_file = open(os.devnull, "w")
stderr_file = None
if stderr_filepath:
stderr_file = open(os.devnull, "w")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
# Retrieve the dashboard url
gcs_client = GcsClient(address=gcs_address, cluster_id=cluster_id_hex)
ray.experimental.internal_kv._initialize_internal_kv(gcs_client)
dashboard_url = None
dashboard_returncode = None
start_time_s = time.time()
while (
time.time() - start_time_s < ray_constants.RAY_DASHBOARD_STARTUP_TIMEOUT_S
):
dashboard_url = ray.experimental.internal_kv._internal_kv_get(
ray_constants.DASHBOARD_ADDRESS,
namespace=ray_constants.KV_NAMESPACE_DASHBOARD,
)
if dashboard_url is not None:
dashboard_url = dashboard_url.decode("utf-8")
break
dashboard_returncode = process_info.process.poll()
if dashboard_returncode is not None:
break
# This is often on the critical path of ray.init() and ray start,
# so we need to poll often.
time.sleep(0.1)
# Dashboard couldn't be started.
if dashboard_url is None:
returncode_str = (
f", return code {dashboard_returncode}"
if dashboard_returncode is not None
else ""
)
logger.error(f"Failed to start the dashboard {returncode_str}")
def read_log(filename, lines_to_read):
"""Read a log file and return the last 20 lines."""
dashboard_log = os.path.join(logdir, filename)
# Read last n lines of dashboard log. The log file may be large.
lines_to_read = 20
lines = []
with open(dashboard_log, "rb") as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
end = mm.size()
for _ in range(lines_to_read):
sep = mm.rfind(b"\n", 0, end - 1)
if sep == -1:
break
lines.append(mm[sep + 1 : end].decode("utf-8"))
end = sep
lines.append(
f"The last {lines_to_read} lines of {dashboard_log} "
"(it contains the error message from the dashboard): "
)
return lines
if logdir:
lines_to_read = 20
logger.error(
"Error should be written to 'dashboard.log' or "
"'dashboard.err'. We are printing the last "
f"{lines_to_read} lines for you. See "
"'https://docs.ray.io/en/master/ray-observability/user-guides/configure-logging.html#logging-directory-structure' " # noqa
"to find where the log file is."
)
try:
lines = read_log("dashboard.log", lines_to_read=lines_to_read)
except Exception as e:
logger.error(
f"Couldn't read dashboard.log file. Error: {e}. "
"It means the dashboard is broken even before it "
"initializes the logger (mostly dependency issues). "
"Reading the dashboard.err file which contains stdout/stderr."
)
# If we cannot read the .log file, we fallback to .err file.
# This is the case where dashboard couldn't be started at all
# and couldn't even initialize the logger to write logs to .log
# file.
try:
lines = read_log("dashboard.err", lines_to_read=lines_to_read)
except Exception as e:
raise Exception(
f"Failed to read dashboard.err file: {e}. "
"It is unexpected. Please report an issue to "
"Ray github. "
"https://github.com/ray-project/ray/issues"
)
last_log_str = "\n" + "\n".join(reversed(lines[-lines_to_read:]))
raise Exception(last_log_str)
else:
# Is it reachable?
raise Exception("Failed to start a dashboard.")
if dashboard_dependency_error is not None or not include_dash:
# If it is the minimal installation, the web url (dashboard url)
# shouldn't be configured because it doesn't start a server.
dashboard_url = ""
return dashboard_url, process_info
except Exception as e:
if raise_on_failure:
raise e from e
else:
logger.error(e)
return None, None
def get_address(redis_address):
parts = redis_address.split("://", 1)
enable_redis_ssl = False
if len(parts) == 1:
redis_ip_address, redis_port = parse_address(parts[0])
else:
# rediss for SSL
if len(parts) != 2 or parts[0] not in ("redis", "rediss"):
raise ValueError(
f"Invalid redis address {redis_address}."
"Expected format is ip:port or redis://ip:port, "
"or rediss://ip:port for SSL."
)
redis_ip_address, redis_port = parse_address(parts[1])
if parts[0] == "rediss":
enable_redis_ssl = True
return redis_ip_address, redis_port, enable_redis_ssl
def start_gcs_server(
redis_address: str,
log_dir: str,
stdout_filepath: Optional[str],
stderr_filepath: Optional[str],
session_name: str,
redis_username: Optional[str] = None,
redis_password: Optional[str] = None,
config: Optional[dict] = None,
fate_share: Optional[bool] = None,
gcs_server_port: Optional[int] = None,
metrics_agent_port: Optional[int] = None,
node_ip_address: Optional[str] = None,
):
"""Start a gcs server.
Args:
redis_address: The address that the Redis server is listening on.
log_dir: The path of the dir where gcs log files are created.
stdout_filepath: The file path to dump gcs server stdout.
If None, stdout is not redirected.
stderr_filepath: The file path to dump gcs server stderr.
If None, stderr is not redirected.
session_name: The current Ray session name.
redis_username: The username of the Redis server.
redis_password: The password of the Redis server.
config: Optional configuration that will
override defaults in RayConfig.
gcs_server_port: Port number of the gcs server.
metrics_agent_port: The port where metrics agent is bound to.
node_ip_address: IP Address of a node where gcs server starts.
Returns:
ProcessInfo for the process that was started.
"""
assert gcs_server_port > 0
command = [
GCS_SERVER_EXECUTABLE,
f"--log_dir={log_dir}",
f"--config_list={serialize_config(config)}",
f"--gcs_server_port={gcs_server_port}",
f"--metrics-agent-port={metrics_agent_port}",
f"--node-ip-address={node_ip_address}",
f"--session-name={session_name}",
f"--ray-commit={ray.__commit__}",
]
if stdout_filepath:
command += [f"--stdout_filepath={stdout_filepath}"]
if stderr_filepath:
command += [f"--stderr_filepath={stderr_filepath}"]
if redis_address:
redis_ip_address, redis_port, enable_redis_ssl = get_address(redis_address)
command += [
f"--redis_address={redis_ip_address}",
f"--redis_port={redis_port}",
f"--redis_enable_ssl={'true' if enable_redis_ssl else 'false'}",
]
if redis_username:
command += [f"--redis_username={redis_username}"]
if redis_password:
command += [f"--redis_password={redis_password}"]
stdout_file = None
if stdout_filepath:
stdout_file = open(os.devnull, "w")
stderr_file = None
if stderr_filepath:
stderr_file = open(os.devnull, "w")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_GCS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
def start_raylet(
redis_address: str,
gcs_address: str,
node_id: str,
node_ip_address: str,
node_manager_port: int,
raylet_name: str,
plasma_store_name: str,
cluster_id: str,
worker_path: str,
setup_worker_path: str,
temp_dir: str,
session_dir: str,
resource_dir: str,
log_dir: str,
resource_and_label_spec,
plasma_directory: str,
fallback_directory: str,
object_store_memory: int,
session_name: str,
is_head_node: bool,
resource_isolation_config: ResourceIsolationConfig,
min_worker_port: Optional[int] = None,
max_worker_port: Optional[int] = None,
worker_port_list: Optional[List[int]] = None,
object_manager_port: Optional[int] = None,
redis_username: Optional[str] = None,
redis_password: Optional[str] = None,
metrics_agent_port: Optional[int] = None,
metrics_export_port: Optional[int] = None,
dashboard_agent_listen_port: Optional[int] = None,
runtime_env_agent_port: Optional[int] = None,
use_valgrind: bool = False,
use_profiler: bool = False,
raylet_stdout_filepath: Optional[str] = None,
raylet_stderr_filepath: Optional[str] = None,
dashboard_agent_stdout_filepath: Optional[str] = None,
dashboard_agent_stderr_filepath: Optional[str] = None,
runtime_env_agent_stdout_filepath: Optional[str] = None,
runtime_env_agent_stderr_filepath: Optional[str] = None,
huge_pages: bool = False,
fate_share: Optional[bool] = None,
socket_to_use: Optional[int] = None,
max_bytes: int = 0,
backup_count: int = 0,
ray_debugger_external: bool = False,
env_updates: Optional[dict] = None,
node_name: Optional[str] = None,
webui: Optional[str] = None,
):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address: The address of the primary Redis server.
gcs_address: The address of GCS server.
node_id: The hex ID of this node.
node_ip_address: The IP address of this node.
node_manager_port: The port to use for the node manager. If it's
0, a random port will be used.
raylet_name: The name of the raylet socket to create.
plasma_store_name: The name of the plasma store socket to connect
to.
worker_path: The path of the Python file that new worker
processes will execute.
setup_worker_path: The path of the Python file that will set up
the environment for the worker process.
temp_dir: The path of the temporary directory Ray will use.
session_dir: The path of this session.
resource_dir: The path of resource of this session .
log_dir: The path of the dir where log files are created.
resource_and_label_spec: Resources and key-value labels for this raylet.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
fallback_directory: A directory where the Object store fallback files will be created.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
session_name: The current Ray session name.
resource_isolation_config: Resource isolation configuration for reserving
memory and cpu resources for ray system processes through cgroupv2
is_head_node: whether this node is the head node.
min_worker_port: The lowest port number that workers will bind
on. If not set, random ports will be chosen.
max_worker_port: The highest port number that workers will bind
on. If set, min_worker_port must also be set.
worker_port_list: An explicit list of ports to be used for
workers (comma-separated). Overrides min_worker_port and
max_worker_port.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
redis_username: The username to use when connecting to Redis.
redis_password: The password to use when connecting to Redis.
metrics_agent_port: The port where metrics agent is bound to.
metrics_export_port: The port at which metrics are exposed to.
dashboard_agent_listen_port: The port at which the dashboard agent
listens to for HTTP.
runtime_env_agent_port: The port at which the runtime env agent
listens to for HTTP.
use_valgrind: True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler: True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
raylet_stdout_filepath: The file path to dump raylet stdout.
If None, stdout is not redirected.
raylet_stderr_filepath: The file path to dump raylet stderr.
If None, stderr is not redirected.
dashboard_agent_stdout_filepath: The file path to dump
dashboard agent stdout. If None, stdout is not redirected.
dashboard_agent_stderr_filepath: The file path to dump
dashboard agent stderr. If None, stderr is not redirected.
runtime_env_agent_stdout_filepath: The file path to dump
runtime env agent stdout. If None, stdout is not redirected.
runtime_env_agent_stderr_filepath: The file path to dump
runtime env agent stderr. If None, stderr is not redirected.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
fate_share: Whether to share fate between raylet and this process.
max_bytes: Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count: Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
ray_debugger_external: True if the Ray debugger should be made
available externally to this node.
env_updates: Environment variable overrides.
node_name: The name of the node.
webui: The url of the UI.
Returns:
ProcessInfo for the process that was started.
"""
assert node_manager_port is not None and type(node_manager_port) is int
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
# Get the static resources and labels from the resolved ResourceAndLabelSpec
static_resources = resource_and_label_spec.to_resource_dict()
labels = resource_and_label_spec.labels
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static)
)
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()]
)
has_java_command = False
if shutil.which("java") is not None:
has_java_command = True
ray_java_installed = False
try:
jars_dir = get_ray_jars_dir()
if os.path.exists(jars_dir):
ray_java_installed = True
except Exception:
pass
include_java = has_java_command and ray_java_installed
if include_java is True:
java_worker_command = build_java_worker_command(
gcs_address,
plasma_store_name,
raylet_name,
redis_username,
redis_password,
session_dir,
node_ip_address,
setup_worker_path,
)
else:
java_worker_command = []
if os.path.exists(DEFAULT_WORKER_EXECUTABLE):
cpp_worker_command = build_cpp_worker_command(
gcs_address,
plasma_store_name,
raylet_name,
redis_username,
redis_password,
session_dir,
log_dir,
node_ip_address,
setup_worker_path,
)
else:
cpp_worker_command = []
# Create the command that the Raylet will use to start workers.
# TODO(architkulkarni): Pipe in setup worker args separately instead of
# inserting them into start_worker_command and later erasing them if
# needed.
start_worker_command = (
[
sys.executable,
setup_worker_path,
]
+ _site_flags() # Inherit "-S" and "-s" flags from current Python interpreter.
+ [
worker_path,
f"--node-ip-address={node_ip_address}",
"--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--redis-address={redis_address}",
f"--metrics-agent-port={metrics_agent_port}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
f"--runtime-env-agent-port={runtime_env_agent_port}",
f"--gcs-address={gcs_address}",
f"--session-name={session_name}",
f"--temp-dir={temp_dir}",
f"--webui={webui}",
f"--cluster-id={cluster_id}",
]
)
start_worker_command.append("RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER")
if redis_username:
start_worker_command += [f"--redis-username={redis_username}"]
if redis_password:
start_worker_command += [f"--redis-password={redis_password}"]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if min_worker_port is None:
min_worker_port = 0
if max_worker_port is None:
max_worker_port = 0
labels_json_str = ""
if labels:
labels_json_str = json.dumps(labels)
dashboard_agent_command = [
*_build_python_executable_command_memory_profileable(
ray_constants.PROCESS_TYPE_DASHBOARD_AGENT, session_dir
),
os.path.join(RAY_PATH, "dashboard", "agent.py"),
f"--node-ip-address={node_ip_address}",
f"--metrics-export-port={metrics_export_port}",
f"--grpc-port={metrics_agent_port}",
f"--listen-port={dashboard_agent_listen_port}",
"--node-manager-port=RAY_NODE_MANAGER_PORT_PLACEHOLDER",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--temp-dir={temp_dir}",
f"--session-dir={session_dir}",
f"--log-dir={log_dir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
f"--session-name={session_name}",
f"--gcs-address={gcs_address}",
f"--cluster-id-hex={cluster_id}",
]
if dashboard_agent_stdout_filepath:
dashboard_agent_command.append(
f"--stdout-filepath={dashboard_agent_stdout_filepath}"
)
if dashboard_agent_stderr_filepath:
dashboard_agent_command.append(
f"--stderr-filepath={dashboard_agent_stderr_filepath}"
)
if (
dashboard_agent_stdout_filepath is None
and dashboard_agent_stderr_filepath is None
):
# If not redirecting logging to files, unset log filename.
# This will cause log records to go to stderr.
dashboard_agent_command.append("--logging-filename=")
# Use stderr log format with the component name as a message prefix.
logging_format = ray_constants.LOGGER_FORMAT_STDERR.format(
component=ray_constants.PROCESS_TYPE_DASHBOARD_AGENT
)
dashboard_agent_command.append(f"--logging-format={logging_format}")
if ray._private.utils.get_dashboard_dependency_error() is not None:
# If dependencies are not installed, it is the minimally packaged
# ray. We should restrict the features within dashboard agent
# that requires additional dependencies to be downloaded.
dashboard_agent_command.append("--minimal")
runtime_env_agent_command = [
*_build_python_executable_command_memory_profileable(
ray_constants.PROCESS_TYPE_RUNTIME_ENV_AGENT, session_dir
),
os.path.join(RAY_PATH, "_private", "runtime_env", "agent", "main.py"),
f"--node-ip-address={node_ip_address}",
f"--runtime-env-agent-port={runtime_env_agent_port}",
f"--gcs-address={gcs_address}",
f"--cluster-id-hex={cluster_id}",
f"--runtime-env-dir={resource_dir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
f"--log-dir={log_dir}",
f"--temp-dir={temp_dir}",
]
if runtime_env_agent_stdout_filepath:
runtime_env_agent_command.append(
f"--stdout-filepath={runtime_env_agent_stdout_filepath}"
)
if runtime_env_agent_stderr_filepath:
runtime_env_agent_command.append(
f"--stderr-filepath={runtime_env_agent_stderr_filepath}"
)
if (
runtime_env_agent_stdout_filepath is None
and runtime_env_agent_stderr_filepath is None
):
# If not redirecting logging to files, unset log filename.
# This will cause log records to go to stderr.
runtime_env_agent_command.append("--logging-filename=")
# Use stderr log format with the component name as a message prefix.
logging_format = ray_constants.LOGGER_FORMAT_STDERR.format(
component=ray_constants.PROCESS_TYPE_RUNTIME_ENV_AGENT
)
runtime_env_agent_command.append(f"--logging-format={logging_format}")
command = [
RAYLET_EXECUTABLE,
f"--raylet_socket_name={raylet_name}",
f"--store_socket_name={plasma_store_name}",
f"--object_manager_port={object_manager_port}",
f"--min_worker_port={min_worker_port}",
f"--max_worker_port={max_worker_port}",
f"--node_manager_port={node_manager_port}",
f"--node_id={node_id}",
f"--node_ip_address={node_ip_address}",
f"--maximum_startup_concurrency={maximum_startup_concurrency}",
f"--static_resource_list={resource_argument}",
f"--python_worker_command={subprocess.list2cmdline(start_worker_command)}", # noqa
f"--java_worker_command={subprocess.list2cmdline(java_worker_command)}", # noqa
f"--cpp_worker_command={subprocess.list2cmdline(cpp_worker_command)}", # noqa
f"--native_library_path={DEFAULT_NATIVE_LIBRARY_PATH}",
f"--temp_dir={temp_dir}",
f"--session_dir={session_dir}",
f"--log_dir={log_dir}",
f"--resource_dir={resource_dir}",
f"--metrics-agent-port={metrics_agent_port}",
f"--metrics_export_port={metrics_export_port}",
f"--runtime_env_agent_port={runtime_env_agent_port}",
f"--object_store_memory={object_store_memory}",
f"--plasma_directory={plasma_directory}",
f"--fallback_directory={fallback_directory}",
f"--ray-debugger-external={1 if ray_debugger_external else 0}",
f"--gcs-address={gcs_address}",
f"--session-name={session_name}",
f"--labels={labels_json_str}",
f"--cluster-id={cluster_id}",
]
if resource_isolation_config.is_enabled():
logging.info(
f"Resource isolation enabled with cgroup_path={resource_isolation_config.cgroup_path}, "
f"system_reserved_cpu={resource_isolation_config.system_reserved_cpu_weight} "
f"system_reserved_memory={resource_isolation_config.system_reserved_memory}."
)
command.append("--enable-resource-isolation")
command.append(f"--cgroup-path={resource_isolation_config.cgroup_path}")
command.append(
f"--system-reserved-cpu-weight={resource_isolation_config.system_reserved_cpu_weight}"
)
command.append(
f"--system-reserved-memory-bytes={resource_isolation_config.system_reserved_memory}"
)
command.append(f"--system-pids={resource_isolation_config.system_pids}")
if raylet_stdout_filepath:
command.append(f"--stdout_filepath={raylet_stdout_filepath}")
if raylet_stderr_filepath:
command.append(f"--stderr_filepath={raylet_stderr_filepath}")
if is_head_node:
command.append("--head")
if worker_port_list is not None:
command.append(f"--worker_port_list={worker_port_list}")
command.append(
"--num_prestart_python_workers={}".format(int(resource_and_label_spec.num_cpus))
)
command.append(
"--dashboard_agent_command={}".format(
subprocess.list2cmdline(dashboard_agent_command)
)
)
command.append(
"--runtime_env_agent_command={}".format(
subprocess.list2cmdline(runtime_env_agent_command)
)
)
if huge_pages:
command.append("--huge_pages")
if socket_to_use:
socket_to_use.close()
if node_name is not None:
command.append(
f"--node-name={node_name}",
)
stdout_file = None
if raylet_stdout_filepath:
stdout_file = open(os.devnull, "w")
stderr_file = None
if raylet_stderr_filepath:
stderr_file = open(os.devnull, "w")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
env_updates=env_updates,
)
return process_info
def get_ray_jars_dir():
"""Return a directory where all ray-related jars and
their dependencies locate."""
current_dir = RAY_PATH
jars_dir = os.path.abspath(os.path.join(current_dir, "jars"))
if not os.path.exists(jars_dir):
raise RuntimeError(
"Ray jars is not packaged into ray. "
"Please build ray with java enabled "
"(set env var RAY_INSTALL_JAVA=1)"
)
return os.path.abspath(os.path.join(current_dir, "jars"))
def build_java_worker_command(
bootstrap_address: str,
plasma_store_name: str,
raylet_name: str,
redis_username: str,
redis_password: str,
session_dir: str,
node_ip_address: str,
setup_worker_path: str,
):
"""This method assembles the command used to start a Java worker.
Args:
bootstrap_address: Bootstrap address of ray cluster.
plasma_store_name: The name of the plasma store socket to connect
to.
raylet_name: The name of the raylet socket to create.
redis_username: The username to connect to Redis.
redis_password: The password to connect to Redis.
session_dir: The path of this session.
node_ip_address: The IP address for this node.
setup_worker_path: The path of the Python file that will set up
the environment for the worker process.
Returns:
The command string for starting Java worker.
"""
pairs = []
if bootstrap_address is not None:
pairs.append(("ray.address", bootstrap_address))
pairs.append(("ray.raylet.node-manager-port", "RAY_NODE_MANAGER_PORT_PLACEHOLDER"))
if plasma_store_name is not None:
pairs.append(("ray.object-store.socket-name", plasma_store_name))
if raylet_name is not None:
pairs.append(("ray.raylet.socket-name", raylet_name))
if redis_username is not None:
pairs.append(("ray.redis.username", redis_username))
if redis_password is not None:
pairs.append(("ray.redis.password", redis_password))
if node_ip_address is not None:
pairs.append(("ray.node-ip", node_ip_address))
pairs.append(("ray.home", RAY_HOME))
pairs.append(("ray.logging.dir", os.path.join(session_dir, "logs")))
pairs.append(("ray.session-dir", session_dir))
command = (
[sys.executable]
+ [setup_worker_path]
+ ["-D{}={}".format(*pair) for pair in pairs]
)
command += ["RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER"]
command += ["io.ray.runtime.runner.worker.DefaultWorker"]
return command
def build_cpp_worker_command(
bootstrap_address: str,
plasma_store_name: str,
raylet_name: str,
redis_username: str,
redis_password: str,
session_dir: str,
log_dir: str,
node_ip_address: str,
setup_worker_path: str,
):
"""This method assembles the command used to start a CPP worker.
Args:
bootstrap_address: The bootstrap address of the cluster.
plasma_store_name: The name of the plasma store socket to connect
to.
raylet_name: The name of the raylet socket to create.
redis_username: The username to connect to Redis.
redis_password: The password to connect to Redis.
session_dir: The path of this session.
log_dir: The path of logs.
node_ip_address: The ip address for this node.
setup_worker_path: The path of the Python file that will set up
the environment for the worker process.
Returns:
The command string for starting CPP worker.
"""
command = [
sys.executable,
setup_worker_path,
DEFAULT_WORKER_EXECUTABLE,
f"--ray_plasma_store_socket_name={plasma_store_name}",
f"--ray_raylet_socket_name={raylet_name}",
"--ray_node_manager_port=RAY_NODE_MANAGER_PORT_PLACEHOLDER",
f"--ray_address={bootstrap_address}",
f"--ray_redis_username={redis_username}",
f"--ray_redis_password={redis_password}",
f"--ray_session_dir={session_dir}",
f"--ray_logs_dir={log_dir}",
f"--ray_node_ip_address={node_ip_address}",
"RAY_WORKER_DYNAMIC_OPTION_PLACEHOLDER",
]
return command
def determine_plasma_store_config(
object_store_memory: int,
temp_dir: str,
plasma_directory: Optional[str] = None,
fallback_directory: Optional[str] = None,
huge_pages: bool = False,
):
"""Figure out how to configure the plasma object store.
This will determine:
1. which directory to use for the plasma store. On Linux,
we will try to use /dev/shm unless the shared memory file system is too
small, in which case we will fall back to /tmp. If any of the object store
memory or plasma directory parameters are specified by the user, then those
values will be preserved.
2. which directory to use for the fallback files. It will default to the temp_dir
if it is not extracted from the object_spilling_config.
Args:
object_store_memory: The object store memory to use.
plasma_directory: The user-specified plasma directory parameter.
fallback_directory: The path extracted from the object_spilling_config when the
object spilling config is set and the spilling type is to
filesystem.
huge_pages: The user-specified huge pages parameter.
Returns:
A tuple of plasma directory to use, the fallback directory to use, and the
object store memory to use. If it is specified by the user, then that value will
be preserved.
"""
if not isinstance(object_store_memory, int):
object_store_memory = int(object_store_memory)
if huge_pages and not (sys.platform == "linux" or sys.platform == "linux2"):
raise ValueError("The huge_pages argument is only supported on Linux.")
system_memory = ray._common.utils.get_system_memory()
# Determine which directory to use. By default, use /tmp on MacOS and
# /dev/shm on Linux, unless the shared-memory file system is too small,
# in which case we default to /tmp on Linux.
if plasma_directory is None:
if sys.platform == "linux" or sys.platform == "linux2":
shm_avail = ray._private.utils.get_shared_memory_bytes()
# Compare the requested memory size to the memory available in
# /dev/shm.
if shm_avail >= object_store_memory:
plasma_directory = "/dev/shm"
elif (
not os.environ.get("RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE")
and object_store_memory > ray_constants.REQUIRE_SHM_SIZE_THRESHOLD
):
raise ValueError(
"The configured object store size ({} GB) exceeds "
"/dev/shm size ({} GB). This will harm performance. "
"Consider deleting files in /dev/shm or increasing its "
"size with "
"--shm-size in Docker. To ignore this warning, "
"set RAY_OBJECT_STORE_ALLOW_SLOW_STORAGE=1.".format(
object_store_memory / 1e9, shm_avail / 1e9
)
)
else:
plasma_directory = ray._common.utils.get_user_temp_dir()
logger.warning(
"WARNING: The object store is using {} instead of "
"/dev/shm because /dev/shm has only {} bytes available. "
"This will harm performance! You may be able to free up "
"space by deleting files in /dev/shm. If you are inside a "
"Docker container, you can increase /dev/shm size by "
"passing '--shm-size={:.2f}gb' to 'docker run' (or add it "
"to the run_options list in a Ray cluster config). Make "
"sure to set this to more than 30% of available RAM.".format(
ray._common.utils.get_user_temp_dir(),
shm_avail,
object_store_memory * (1.1) / (2**30),
)
)
else:
plasma_directory = ray._common.utils.get_user_temp_dir()
# Do some sanity checks.
if object_store_memory > system_memory:
raise ValueError(
"The requested object store memory size is greater "
"than the total available memory."
)
else:
plasma_directory = os.path.abspath(plasma_directory)
logger.info("object_store_memory is not verified when plasma_directory is set.")
if not os.path.isdir(plasma_directory):
raise ValueError(
f"The plasma directory file {plasma_directory} does not exist or is not a directory."
)
if huge_pages and plasma_directory is None:
raise ValueError(
"If huge_pages is True, then the "
"plasma_directory argument must be provided."
)
if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES:
raise ValueError(
"Attempting to cap object store memory usage at {} "
"bytes, but the minimum allowed is {} bytes.".format(
object_store_memory, ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES
)
)
if (
sys.platform == "darwin"
and object_store_memory > ray_constants.MAC_DEGRADED_PERF_MMAP_SIZE_LIMIT
and os.environ.get("RAY_ENABLE_MAC_LARGE_OBJECT_STORE") != "1"
):
raise ValueError(
"The configured object store size ({:.4}GiB) exceeds "
"the optimal size on Mac ({:.4}GiB). "
"This will harm performance! There is a known issue where "
"Ray's performance degrades with object store size greater"
" than {:.4}GB on a Mac."
"To reduce the object store capacity, specify"
"`object_store_memory` when calling ray.init() or ray start."
"To ignore this warning, "
"set RAY_ENABLE_MAC_LARGE_OBJECT_STORE=1.".format(
object_store_memory / 2**30,
ray_constants.MAC_DEGRADED_PERF_MMAP_SIZE_LIMIT / 2**30,
ray_constants.MAC_DEGRADED_PERF_MMAP_SIZE_LIMIT / 2**30,
)
)
if fallback_directory is None:
fallback_directory = temp_dir
else:
fallback_directory = os.path.abspath(fallback_directory)
if not os.path.isdir(fallback_directory):
raise ValueError(
f"The fallback directory file {fallback_directory} does not exist or is not a directory."
)
# Print the object store memory using two decimal places.
logger.debug(
"Determine to start the Plasma object store with {} GB memory "
"using {} and fallback to {}".format(
round(object_store_memory / 10**9, 2),
plasma_directory,
fallback_directory,
)
)
return plasma_directory, fallback_directory, object_store_memory
def start_monitor(
gcs_address: str,
logs_dir: str,
stdout_filepath: Optional[str] = None,
stderr_filepath: Optional[str] = None,
autoscaling_config: Optional[str] = None,
fate_share: Optional[bool] = None,
max_bytes: int = 0,
backup_count: int = 0,
monitor_ip: Optional[str] = None,
autoscaler_v2: bool = False,
):
"""Run a process to monitor the other processes.
Args:
gcs_address: The address of GCS server.
logs_dir: The path to the log directory.
stdout_filepath: The file path to dump monitor stdout.
If None, stdout is not redirected.
stderr_filepath: The file path to dump monitor stderr.
If None, stderr is not redirected.
autoscaling_config: path to autoscaling config file.
max_bytes: Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count: Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
monitor_ip: IP address of the machine that the monitor will be
run on. Can be excluded, but required for autoscaler metrics.
Returns:
ProcessInfo for the process that was started.
"""
if autoscaler_v2:
entrypoint = os.path.join(RAY_PATH, AUTOSCALER_V2_DIR, "monitor.py")
else:
entrypoint = os.path.join(RAY_PATH, AUTOSCALER_PRIVATE_DIR, "monitor.py")
command = [
sys.executable,
"-u",
entrypoint,
f"--logs-dir={logs_dir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
assert gcs_address is not None
command.append(f"--gcs-address={gcs_address}")
if stdout_filepath:
command.append(f"--stdout-filepath={stdout_filepath}")
if stderr_filepath:
command.append(f"--stderr-filepath={stderr_filepath}")
if stdout_filepath is None and stderr_filepath is None:
# If not redirecting logging to files, unset log filename.
# This will cause log records to go to stderr.
command.append("--logging-filename=")
# Use stderr log format with the component name as a message prefix.
logging_format = ray_constants.LOGGER_FORMAT_STDERR.format(
component=ray_constants.PROCESS_TYPE_MONITOR
)
command.append(f"--logging-format={logging_format}")
if autoscaling_config:
command.append("--autoscaling-config=" + str(autoscaling_config))
if monitor_ip:
command.append("--monitor-ip=" + monitor_ip)
stdout_file = None
if stdout_filepath:
stdout_file = open(os.devnull, "w")
stderr_file = None
if stderr_filepath:
stderr_file = open(os.devnull, "w")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
def start_ray_client_server(
address: str,
ray_client_server_ip: str,
ray_client_server_port: int,
stdout_file: Optional[int] = None,
stderr_file: Optional[int] = None,
redis_username: Optional[int] = None,
redis_password: Optional[int] = None,
fate_share: Optional[bool] = None,
runtime_env_agent_address: Optional[str] = None,
server_type: str = "proxy",
serialized_runtime_env_context: Optional[str] = None,
):
"""Run the server process of the Ray client.
Args:
address: The address of the cluster.
ray_client_server_ip: Host IP the Ray client server listens on.
ray_client_server_port: Port the Ray client server listens on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_username: The username of the Redis server.
redis_password: The password of the Redis server.
runtime_env_agent_address: Address to the Runtime Env Agent listens on via HTTP.
Only needed when server_type == "proxy".
server_type: Whether to start the proxy version of Ray Client.
serialized_runtime_env_context (str|None): If specified, the serialized
runtime_env_context to start the client server in.
Returns:
ProcessInfo for the process that was started.
"""
root_ray_dir = Path(__file__).resolve().parents[1]
setup_worker_path = os.path.join(
root_ray_dir, "_private", "workers", ray_constants.SETUP_WORKER_FILENAME
)
ray_client_server_host = ray_client_server_ip
command = [
sys.executable,
setup_worker_path,
"-m",
"ray.util.client.server",
f"--address={address}",
f"--host={ray_client_server_host}",
f"--port={ray_client_server_port}",
f"--mode={server_type}",
f"--language={Language.Name(Language.PYTHON)}",
]
if redis_username:
command.append(f"--redis-username={redis_username}")
if redis_password:
command.append(f"--redis-password={redis_password}")
if serialized_runtime_env_context:
command.append(
f"--serialized-runtime-env-context={serialized_runtime_env_context}" # noqa: E501
)
if server_type == "proxy":
assert len(runtime_env_agent_address) > 0
if runtime_env_agent_address:
command.append(f"--runtime-env-agent-address={runtime_env_agent_address}")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
def _is_raylet_process(cmdline: Optional[List[str]]) -> bool:
"""Check if the command line belongs to a raylet process.
Args:
cmdline: List of command line arguments or None
Returns:
bool: True if this is a raylet process, False otherwise
"""
if cmdline is None or len(cmdline) == 0:
return False
executable = os.path.basename(cmdline[0])
return "raylet" in executable
| ConsolePopen |
python | pytorch__pytorch | torch/_dynamo/variables/misc.py | {
"start": 27368,
"end": 28227
} | class ____(VariableTracker):
# If the cell existed before Dynamo tracing started, this will be the
# VariableTracker that represents the cell content.
#
# Note that all mutation to the cell (i.e., its content) will be buffered in
# SideEffects, rather than being reflected here. One can think of
# `CellVariable` as a special case for `UserDefinedObjectVariable`.
pre_existing_contents: Optional[VariableTracker]
# This is set when this cell can be referenced via `LOAD/STORE_DEREF` in the
# root frame via this name (e.g., the name is in `co_cellvars/co_freevars`).
local_name: Optional[str] = None
def __init__(
self, pre_existing_contents: Optional[VariableTracker] = None, **kwargs
) -> None:
super().__init__(**kwargs)
self.pre_existing_contents = pre_existing_contents
| CellVariable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.