language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/links/step_function.py
|
{
"start": 1514,
"end": 2099
}
|
class ____(BaseAwsLink):
"""Helper class for constructing link to State Machine Execution details page."""
name = "State Machine Executions Details"
key = "_state_machine_executions_details"
format_str = (
BASE_AWS_CONSOLE_LINK + "/states/home?region={region_name}#/v2/executions/details/{execution_arn}"
)
def format_link(self, *, execution_arn: str | None = None, **kwargs) -> str:
if not execution_arn:
return ""
return super().format_link(execution_arn=quote_plus(execution_arn), **kwargs)
|
StateMachineExecutionsDetailsLink
|
python
|
bokeh__bokeh
|
src/bokeh/models/map_plots.py
|
{
"start": 2622,
"end": 3435
}
|
class ____(Plot):
''' Abstract base class for map plot models.
'''
def __init__(self, *args, **kwargs) -> None:
from ..models.ranges import Range1d
for r in ('x_range', 'y_range'):
if r in kwargs and not isinstance(kwargs.get(r), Range1d):
raise ValueError(f"Invalid value for {r!r}, MapPlot ranges may only be Range1d, not data ranges")
super().__init__(*args, **kwargs)
@error(INCOMPATIBLE_MAP_RANGE_TYPE)
def _check_incompatible_map_range_type(self):
from ..models.ranges import Range1d
if self.x_range is not None and not isinstance(self.x_range, Range1d):
return f"{self!s}.x_range"
if self.y_range is not None and not isinstance(self.y_range, Range1d):
return f"{self!s}.y_range"
|
MapPlot
|
python
|
django__django
|
tests/sites_framework/models.py
|
{
"start": 137,
"end": 328
}
|
class ____(models.Model):
title = models.CharField(max_length=50)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
abstract = True
|
AbstractArticle
|
python
|
ashishps1__awesome-system-design-resources
|
implementations/python/rate_limiting/sliding_window_counter.py
|
{
"start": 13,
"end": 1493
}
|
class ____:
def __init__(self, window_size, max_requests):
self.window_size = window_size # Size of the sliding window in seconds
self.max_requests = max_requests # Maximum number of requests per window
self.current_window = time.time() // window_size
self.request_count = 0
self.previous_count = 0
def allow_request(self):
now = time.time()
window = now // self.window_size
# If we've moved to a new window, update the counts
if window != self.current_window:
self.previous_count = self.request_count
self.request_count = 0
self.current_window = window
# Calculate the weighted request count
window_elapsed = (now % self.window_size) / self.window_size
threshold = self.previous_count * (1 - window_elapsed) + self.request_count
# Check if we're within the limit
if threshold < self.max_requests:
self.request_count += 1
return True
return False
# Usage example
limiter = SlidingWindowCounter(window_size=60, max_requests=5) # 5 requests per minute
for _ in range(10):
print(limiter.allow_request()) # Will print True for the first 5 requests, then gradually become False
time.sleep(0.1) # Wait a bit between requests
time.sleep(30) # Wait for half the window to pass
print(limiter.allow_request()) # Might be True or False depending on the exact timing
|
SlidingWindowCounter
|
python
|
python-jsonschema__jsonschema
|
jsonschema/tests/test_exceptions.py
|
{
"start": 11006,
"end": 12995
}
|
class ____(TestCase):
def test_short_paths_are_better_matches(self):
shallow = exceptions.ValidationError("Oh no!", path=["baz"])
deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
match = max([shallow, deep], key=exceptions.relevance)
self.assertIs(match, shallow)
match = max([deep, shallow], key=exceptions.relevance)
self.assertIs(match, shallow)
def test_global_errors_are_even_better_matches(self):
shallow = exceptions.ValidationError("Oh no!", path=[])
deep = exceptions.ValidationError("Oh yes!", path=["foo"])
errors = sorted([shallow, deep], key=exceptions.relevance)
self.assertEqual(
[list(error.path) for error in errors],
[["foo"], []],
)
errors = sorted([deep, shallow], key=exceptions.relevance)
self.assertEqual(
[list(error.path) for error in errors],
[["foo"], []],
)
def test_weak_keywords_are_lower_priority(self):
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
best_match = exceptions.by_relevance(weak="a")
match = max([weak, normal], key=best_match)
self.assertIs(match, normal)
match = max([normal, weak], key=best_match)
self.assertIs(match, normal)
def test_strong_keywords_are_higher_priority(self):
weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
best_match = exceptions.by_relevance(weak="a", strong="c")
match = max([weak, normal, strong], key=best_match)
self.assertIs(match, strong)
match = max([strong, normal, weak], key=best_match)
self.assertIs(match, strong)
|
TestByRelevance
|
python
|
walkccc__LeetCode
|
solutions/789. Escape The Ghosts/789.py
|
{
"start": 0,
"end": 253
}
|
class ____:
def escapeGhosts(self, ghosts: list[list[int]], target: list[int]) -> bool:
ghostSteps = min(abs(x - target[0]) +
abs(y - target[1]) for x, y in ghosts)
return abs(target[0]) + abs(target[1]) < ghostSteps
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_sensors.py
|
{
"start": 11684,
"end": 25218
}
|
class ____(NonLaunchableGraphQLContextTestMatrix):
@pytest.mark.parametrize(
"sensor_name, expected_type",
[
("always_no_config_sensor_with_tags_and_metadata", "STANDARD"),
("run_status", "RUN_STATUS"),
("single_asset_sensor", "ASSET"),
("many_asset_sensor", "MULTI_ASSET"),
("the_failure_sensor", "RUN_STATUS"),
],
)
def test_sensor_types(
self, graphql_context: WorkspaceRequestContext, sensor_name, expected_type
):
sensor_selector = infer_sensor_selector(graphql_context, sensor_name)
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["sensorOrError"]
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
assert sensor["sensorType"] == expected_type
def test_dry_run(self, graphql_context: WorkspaceRequestContext):
instigator_selector = infer_sensor_selector(
graphql_context, "always_no_config_sensor_with_tags_and_metadata"
)
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert evaluation_result["cursor"] == "blah"
assert len(evaluation_result["runRequests"]) == 1
assert evaluation_result["runRequests"][0]["runConfigYaml"] == "{}\n"
assert evaluation_result["skipReason"] is None
assert evaluation_result["error"] is None
assert evaluation_result["dynamicPartitionsRequests"] == []
def test_dry_run_with_run_key(self, graphql_context: WorkspaceRequestContext):
instigator_selector = infer_sensor_selector(graphql_context, "run_key_sensor")
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert evaluation_result["cursor"] == "blah"
assert len(evaluation_result["runRequests"]) == 1
assert evaluation_result["runRequests"][0]["runConfigYaml"] == "{}\n"
assert evaluation_result["skipReason"] is None
assert evaluation_result["error"] is None
assert evaluation_result["dynamicPartitionsRequests"] == []
graphql_context.instance.add_run(
DagsterRun(
job_name="run_key_sensor",
run_id=make_new_run_id(),
tags={RUN_KEY_TAG: "the_key", SENSOR_NAME_TAG: "run_key_sensor"},
)
)
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
# no more run run requests because the key matches
assert len(evaluation_result["runRequests"]) == 0
def test_dry_run_with_dynamic_partition_requests(
self, graphql_context: WorkspaceRequestContext
):
instigator_selector = infer_sensor_selector(
graphql_context, "dynamic_partition_requesting_sensor"
)
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert evaluation_result["cursor"] == "blah"
assert len(evaluation_result["runRequests"]) == 1
assert evaluation_result["runRequests"][0]["runConfigYaml"] == "{}\n"
assert evaluation_result["skipReason"] is None
assert evaluation_result["error"] is None
assert len(evaluation_result["dynamicPartitionsRequests"]) == 2
assert evaluation_result["dynamicPartitionsRequests"][0]["partitionKeys"] == [
"new_key",
"new_key2",
"existent_key",
]
assert evaluation_result["dynamicPartitionsRequests"][0]["partitionsDefName"] == "foo"
assert (
evaluation_result["dynamicPartitionsRequests"][0]["type"]
== GrapheneDynamicPartitionsRequestType.ADD_PARTITIONS
)
assert evaluation_result["dynamicPartitionsRequests"][1]["partitionKeys"] == [
"old_key",
"nonexistent_key",
]
assert evaluation_result["dynamicPartitionsRequests"][1]["partitionsDefName"] == "foo"
assert (
evaluation_result["dynamicPartitionsRequests"][1]["type"]
== GrapheneDynamicPartitionsRequestType.DELETE_PARTITIONS
)
def test_dry_run_failure(self, graphql_context: WorkspaceRequestContext):
instigator_selector = infer_sensor_selector(graphql_context, "always_error_sensor")
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert not evaluation_result["runRequests"]
assert not evaluation_result["skipReason"]
assert evaluation_result["dynamicPartitionsRequests"] is None
assert (
"Error occurred during the execution of evaluation_fn"
in evaluation_result["error"]["message"]
)
def test_dry_run_skip(self, graphql_context: WorkspaceRequestContext):
instigator_selector = infer_sensor_selector(graphql_context, "never_no_config_sensor")
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": instigator_selector, "cursor": "blah"},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert not evaluation_result["runRequests"]
assert evaluation_result["skipReason"] == "never"
assert not evaluation_result["error"]
def test_dry_run_non_existent_sensor(self, graphql_context: WorkspaceRequestContext):
unknown_instigator_selector = infer_sensor_selector(graphql_context, "sensor_doesnt_exist")
with pytest.raises(UserFacingGraphQLError, match="GrapheneSensorNotFoundError"):
execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": unknown_instigator_selector, "cursor": "blah"},
)
unknown_repo_selector = {**unknown_instigator_selector}
unknown_repo_selector["repositoryName"] = "doesnt_exist"
with pytest.raises(UserFacingGraphQLError, match="GrapheneSensorNotFoundError"):
execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": unknown_repo_selector, "cursor": "blah"},
)
unknown_repo_location_selector = {**unknown_instigator_selector}
unknown_repo_location_selector["repositoryLocationName"] = "doesnt_exist"
with pytest.raises(UserFacingGraphQLError, match="GrapheneRepositoryLocationNotFound"):
execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": unknown_repo_location_selector, "cursor": "blah"},
)
def test_dry_run_cursor_updates(self, graphql_context: WorkspaceRequestContext):
# Ensure that cursor does not update between dry runs
selector = infer_sensor_selector(graphql_context, "update_cursor_sensor")
result = execute_dagster_graphql(
graphql_context,
SENSOR_DRY_RUN_MUTATION,
variables={"selectorData": selector, "cursor": None},
)
assert result.data
assert result.data["sensorDryRun"]["__typename"] == "DryRunInstigationTick"
evaluation_result = result.data["sensorDryRun"]["evaluationResult"]
assert evaluation_result["cursor"] == "1"
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_CURSOR_QUERY,
variables={"sensorSelector": selector},
)
assert result.data
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
cursor = sensor["sensorState"]["typeSpecificData"]["lastCursor"]
assert not cursor
def test_get_sensors(self, graphql_context: WorkspaceRequestContext, snapshot):
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSORS_QUERY,
variables={"repositorySelector": selector},
)
assert result.data
assert result.data["sensorsOrError"]
assert result.data["sensorsOrError"]["__typename"] == "Sensors"
results = result.data["sensorsOrError"]["results"]
# Snapshot is different for test_dict_repo because it does not contain any asset jobs,
# so the sensor targets for sensors with asset selections differ
if selector["repositoryName"] != "test_dict_repo":
snapshot.assert_match(results)
def test_get_sensors_filtered(self, graphql_context: WorkspaceRequestContext, snapshot):
selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
GET_SENSORS_BY_STATUS_QUERY,
variables={"repositorySelector": selector, "status": "RUNNING"},
)
assert result.data
assert result.data["sensorsOrError"]
assert result.data["sensorsOrError"]["__typename"] == "Sensors"
results = result.data["sensorsOrError"]["results"]
snapshot.assert_match(results)
# running status includes automatically running sensors
assert "running_in_code_sensor" in {
sensor["name"] for sensor in result.data["sensorsOrError"]["results"]
}
result = execute_dagster_graphql(
graphql_context,
GET_SENSORS_BY_STATUS_QUERY,
variables={"repositorySelector": selector, "status": "STOPPED"},
)
assert result.data
assert result.data["sensorsOrError"]
assert result.data["sensorsOrError"]["__typename"] == "Sensors"
results = result.data["sensorsOrError"]["results"]
assert "running_in_code_sensor" not in {
sensor["name"] for sensor in result.data["sensorsOrError"]["results"]
}
def test_get_sensor(self, graphql_context: WorkspaceRequestContext, snapshot):
sensor_selector = infer_sensor_selector(
graphql_context, "always_no_config_sensor_with_tags_and_metadata"
)
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["sensorOrError"]
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
snapshot.assert_match(sensor)
assert sensor["sensorType"] == "STANDARD"
assert sensor["tags"] == [{"key": "foo", "value": "bar"}]
assert sensor["metadataEntries"] == [{"label": "foo", "text": "bar"}]
def test_sensor_owners(self, graphql_context: WorkspaceRequestContext):
sensor_selector = infer_sensor_selector(graphql_context, "owned_sensor")
result = execute_dagster_graphql(
graphql_context,
GET_SENSOR_QUERY,
variables={"sensorSelector": sensor_selector},
)
assert result.data
assert result.data["sensorOrError"]
assert result.data["sensorOrError"]["__typename"] == "Sensor"
sensor = result.data["sensorOrError"]
assert sensor["owners"] is not None
assert len(sensor["owners"]) == 2
# Check the user owner
user_owner = None
team_owner = None
for owner in sensor["owners"]:
if owner.get("email"):
user_owner = owner
elif owner.get("team"):
team_owner = owner
assert user_owner is not None
assert user_owner["email"] == "test@elementl.com"
assert team_owner is not None
assert team_owner["team"] == "foo"
|
TestSensors
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 32748,
"end": 32972
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("ABUSE", "DUPLICATE", "OFF_TOPIC", "OUTDATED", "RESOLVED", "SPAM")
|
ReportedContentClassifiers
|
python
|
apache__airflow
|
airflow-core/tests/unit/listeners/file_write_listener.py
|
{
"start": 979,
"end": 1917
}
|
class ____:
def __init__(self, path):
self.path = path
def write(self, line: str):
with open(self.path, "a") as f:
f.write(line + "\n")
@hookimpl
def on_task_instance_running(self, previous_state, task_instance):
self.write("on_task_instance_running")
@hookimpl
def on_task_instance_success(self, previous_state, task_instance):
self.write("on_task_instance_success")
@hookimpl
def on_task_instance_failed(self, previous_state, task_instance, error: None | str | BaseException):
self.write("on_task_instance_failed")
@hookimpl
def on_starting(self, component):
if isinstance(component, TaskCommandMarker):
self.write("on_starting")
@hookimpl
def before_stopping(self, component):
if isinstance(component, TaskCommandMarker):
self.write("before_stopping")
def clear():
pass
|
FileWriteListener
|
python
|
django__django
|
tests/delete/models.py
|
{
"start": 5058,
"end": 5194
}
|
class ____(models.Model):
desc = models.TextField(null=True)
# This model is used to test a duplicate query regression (#25685)
|
Avatar
|
python
|
django__django
|
django/template/defaulttags.py
|
{
"start": 8411,
"end": 10370
}
|
class ____(Node):
child_nodelists = ("nodelist_true", "nodelist_false")
def __init__(self, nodelist_true, nodelist_false, *varlist):
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
self._varlist = varlist
def render(self, context):
# Init state storage
state_frame = self._get_context_stack_frame(context)
state_frame.setdefault(self)
nodelist_true_output = None
if self._varlist:
# Consider multiple parameters. This behaves like an OR evaluation
# of the multiple variables.
compare_to = [
var.resolve(context, ignore_failures=True) for var in self._varlist
]
else:
# The "{% ifchanged %}" syntax (without any variables) compares
# the rendered output.
compare_to = nodelist_true_output = self.nodelist_true.render(context)
if compare_to != state_frame[self]:
state_frame[self] = compare_to
# render true block if not already rendered
return nodelist_true_output or self.nodelist_true.render(context)
elif self.nodelist_false:
return self.nodelist_false.render(context)
return ""
def _get_context_stack_frame(self, context):
# The Context object behaves like a stack where each template tag can
# create a new scope. Find the place where to store the state to detect
# changes.
if "forloop" in context:
# Ifchanged is bound to the local for loop.
# When there is a loop-in-loop, the state is bound to the inner
# loop, so it resets when the outer loop continues.
return context["forloop"]
else:
# Using ifchanged outside loops. Effectively this is a no-op
# because the state is associated with 'self'.
return context.render_context
|
IfChangedNode
|
python
|
kamyu104__LeetCode-Solutions
|
Python/diameter-of-n-ary-tree.py
|
{
"start": 213,
"end": 1048
}
|
class ____(object):
def diameter(self, root):
"""
:type root: 'Node'
:rtype: int
"""
def iter_dfs(root):
result = [0]*2
stk = [(1, (root, result))]
while stk:
step, params = stk.pop()
if step == 1:
node, ret = params
for child in reversed(node.children):
ret2 = [0]*2
stk.append((2, (ret2, ret)))
stk.append((1, (child, ret2)))
else:
ret2, ret = params
ret[0] = max(ret[0], ret2[0], ret[1]+ret2[1]+1)
ret[1] = max(ret[1], ret2[1]+1)
return result
return iter_dfs(root)[0]
# Time: O(n)
# Space: O(h)
|
Solution
|
python
|
astral-sh__uv
|
crates/uv-python/fetch-download-metadata.py
|
{
"start": 5643,
"end": 16262
}
|
class ____(Finder):
implementation = ImplementationName.CPYTHON
RELEASE_URL = (
"https://api.github.com/repos/astral-sh/python-build-standalone/releases"
)
FLAVOR_PREFERENCES = [
"install_only_stripped",
"install_only",
"shared-pgo",
"shared-noopt",
"static-noopt",
]
SPECIAL_TRIPLES = {
"macos": "x86_64-apple-darwin",
"linux64": "x86_64-unknown-linux-gnu",
"windows-amd64": "x86_64-pc-windows",
"windows-x86": "i686-pc-windows",
"windows-amd64-shared": "x86_64-pc-windows",
"windows-x86-shared": "i686-pc-windows",
"linux64-musl": "x86_64-unknown-linux-musl",
}
# Normalized mappings to match the Rust types
ARCH_MAP = {
"ppc64": "powerpc64",
"ppc64le": "powerpc64le",
}
_filename_re = re.compile(
r"""(?x)
^
cpython-
(?P<ver>\d+\.\d+\.\d+(?:(?:a|b|rc)\d+)?)(?:\+\d+)?\+
(?P<date>\d+)-
# Note we lookahead to avoid matching "debug" as a triple as we'd
# prefer it matches as a build option; we could enumerate all known
# build options instead but this is the easy path forward
(?P<triple>[a-z\d_]+-[a-z\d]+(?>-[a-z\d]+)?-(?!debug(?:-|$))[a-z\d_]+)-
(?:(?P<build_options>.+)-)?
(?P<flavor>[a-z_]+)?
\.tar\.(?:gz|zst)
$
"""
)
_legacy_filename_re = re.compile(
r"""(?x)
^
cpython-
(?P<ver>\d+\.\d+\.\d+(?:(?:a|b|rc)\d+)?)(?:\+\d+)?-
(?P<triple>[a-z\d_-]+)-
(?P<build_options>(debug|pgo|noopt|lto|pgo\+lto))?-
(?P<date>[a-zA-z\d]+)
\.tar\.(?:gz|zst)
$
"""
)
def __init__(self, client: httpx.AsyncClient):
self.client = client
async def find(self) -> list[PythonDownload]:
downloads = await self._fetch_downloads()
await self._fetch_checksums(downloads, n=20)
return downloads
async def _fetch_downloads(self, pages: int = 100) -> list[PythonDownload]:
"""Fetch all the indygreg downloads from the release API."""
downloads_by_version: dict[Version, list[PythonDownload]] = {}
# Collect all available Python downloads
for page in range(1, pages + 1):
logging.info("Fetching CPython release page %d", page)
resp = await self.client.get(
self.RELEASE_URL, params={"page": page, "per_page": 10}
)
resp.raise_for_status()
rows = resp.json()
if not rows:
break
for row in rows:
# Sort the assets to ensure deterministic results
row["assets"].sort(key=lambda asset: asset["browser_download_url"])
for asset in row["assets"]:
download = self._parse_download_asset(asset)
if download is None:
continue
if (
download.release < CPYTHON_MUSL_STATIC_RELEASE_END
and download.triple.libc == "musl"
):
continue
logging.debug("Found %s (%s)", download.key(), download.filename)
downloads_by_version.setdefault(download.version, []).append(
download
)
# Collapse CPython variants to a single flavor per triple and variant
downloads = []
for version_downloads in downloads_by_version.values():
selected: dict[
tuple[PlatformTripleKey, Variant | None],
tuple[PythonDownload, tuple[int, int]],
] = {}
for download in version_downloads:
priority = self._get_priority(download)
existing = selected.get((download.triple.key(), download.variant))
if existing:
existing_download, existing_priority = existing
# Skip if we have a flavor with higher priority already (indicated by a smaller value)
if priority >= existing_priority:
logging.debug(
"Skipping %s (%s): lower priority than %s (%s)",
download.key(),
download.flavor,
existing_download.key(),
existing_download.flavor,
)
continue
selected[(download.triple.key(), download.variant)] = (
download,
priority,
)
# Drop the priorities
downloads.extend([download for download, _ in selected.values()])
return downloads
async def _fetch_checksums(self, downloads: list[PythonDownload], n: int) -> None:
"""Fetch the checksums for the given downloads."""
checksum_urls = set()
for download in downloads:
# Skip the newer releases where we got the hash from the GitHub API
if download.sha256:
continue
release_base_url = download.url.rsplit("/", maxsplit=1)[0]
checksum_url = release_base_url + "/SHA256SUMS"
checksum_urls.add(checksum_url)
async def fetch_checksums(url: str) -> httpx.Response | None:
try:
resp = await self.client.get(url)
resp.raise_for_status()
except httpx.HTTPStatusError as e:
if e.response.status_code == 404:
return None
raise
return resp
completed = 0
tasks = []
for batch in batched(checksum_urls, n):
logging.info(
"Fetching CPython checksums: %d/%d", completed, len(checksum_urls)
)
async with asyncio.TaskGroup() as tg:
for url in batch:
task = tg.create_task(fetch_checksums(url))
tasks.append(task)
completed += n
checksums = {}
for task in tasks:
resp = task.result()
if resp is None:
continue
lines = resp.text.splitlines()
for line in lines:
checksum, filename = line.split(" ", maxsplit=1)
filename = filename.strip()
checksums[filename] = checksum
for download in downloads:
if download.sha256:
continue
download.sha256 = checksums.get(download.filename)
def _parse_download_asset(self, asset: dict[str, Any]) -> PythonDownload | None:
"""Parse a python-build-standalone download asset into a PythonDownload object."""
url = asset["browser_download_url"]
# Ex)
# https://github.com/astral-sh/python-build-standalone/releases/download/20240107/cpython-3.12.1%2B20240107-aarch64-unknown-linux-gnu-lto-full.tar.zst
if url.endswith(".sha256"):
return None
release = int(url.rsplit("/")[-2])
filename = asset["name"]
sha256 = None
# On older versions, GitHub didn't backfill the digest.
if digest := asset["digest"]:
sha256 = digest.removeprefix("sha256:")
match = self._filename_re.match(filename) or self._legacy_filename_re.match(
filename
)
if match is None:
logging.debug("Skipping %s: no regex match", filename)
return None
groups = match.groupdict()
version = groups["ver"]
triple = groups["triple"]
build_options = groups.get("build_options")
flavor = groups.get("flavor", "full")
build_options = build_options.split("+") if build_options else []
variant = Variant.from_build_options(build_options)
version = Version.from_str(version)
triple = self._normalize_triple(triple)
if triple is None:
# Skip is logged in `_normalize_triple`
return None
return PythonDownload(
release=release,
version=version,
triple=triple,
flavor=flavor,
implementation=self.implementation,
filename=filename,
url=url,
build=str(release),
build_options=build_options,
variant=variant,
sha256=sha256,
)
def _normalize_triple(self, triple: str) -> PlatformTriple | None:
if "-static" in triple:
logging.debug("Skipping %r: static unsupported", triple)
return None
triple = self.SPECIAL_TRIPLES.get(triple, triple)
pieces = triple.split("-")
try:
arch = self._normalize_arch(pieces[0])
operating_system = self._normalize_os(pieces[2])
if pieces[2] == "linux":
# On linux, the triple has four segments, the last one is the libc
libc = pieces[3]
else:
libc = "none"
except IndexError:
logging.debug("Skipping %r: unknown triple", triple)
return None
return PlatformTriple(operating_system, arch, libc)
def _normalize_arch(self, arch: str) -> Arch:
arch = self.ARCH_MAP.get(arch, arch)
pieces = arch.split("_")
family = "_".join(pieces[:2])
variant = pieces[2] if len(pieces) > 2 else None
return Arch(family, variant)
def _normalize_os(self, os: str) -> str:
return os
def _get_priority(self, download: PythonDownload) -> tuple[int, int]:
"""
Returns the priority of a download, a lower score is better.
"""
flavor_priority = self._flavor_priority(download.flavor)
build_option_priority = self._build_option_priority(download.build_options)
return (flavor_priority, build_option_priority)
def _flavor_priority(self, flavor: str) -> int:
try:
priority = self.FLAVOR_PREFERENCES.index(flavor)
except ValueError:
priority = len(self.FLAVOR_PREFERENCES) + 1
return priority
def _build_option_priority(self, build_options: list[str]) -> int:
# Prefer optimized builds
return -1 * sum(
(
"lto" in build_options,
"pgo" in build_options,
"static" not in build_options,
)
)
|
CPythonFinder
|
python
|
pandas-dev__pandas
|
pandas/tests/io/formats/test_eng_formatting.py
|
{
"start": 278,
"end": 8454
}
|
class ____:
def test_eng_float_formatter2(self, float_frame):
df = float_frame
df.loc[5] = 0
set_eng_float_format()
repr(df)
set_eng_float_format(use_eng_prefix=True)
repr(df)
set_eng_float_format(accuracy=0)
repr(df)
def test_eng_float_formatter(self):
df = DataFrame({"A": [1.41, 141.0, 14100, 1410000.0]})
set_eng_float_format()
result = df.to_string()
expected = (
" A\n"
"0 1.410E+00\n"
"1 141.000E+00\n"
"2 14.100E+03\n"
"3 1.410E+06"
)
assert result == expected
set_eng_float_format(use_eng_prefix=True)
result = df.to_string()
expected = " A\n0 1.410\n1 141.000\n2 14.100k\n3 1.410M"
assert result == expected
set_eng_float_format(accuracy=0)
result = df.to_string()
expected = " A\n0 1E+00\n1 141E+00\n2 14E+03\n3 1E+06"
assert result == expected
def compare(self, formatter, input, output):
formatted_input = formatter(input)
assert formatted_input == output
def compare_all(self, formatter, in_out):
"""
Parameters:
-----------
formatter: EngFormatter under test
in_out: list of tuples. Each tuple = (number, expected_formatting)
It is tested if 'formatter(number) == expected_formatting'.
*number* should be >= 0 because formatter(-number) == fmt is also
tested. *fmt* is derived from *expected_formatting*
"""
for input, output in in_out:
self.compare(formatter, input, output)
self.compare(formatter, -input, "-" + output[1:])
def test_exponents_with_eng_prefix(self):
formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
f = np.sqrt(2)
in_out = [
(f * 10**-24, " 1.414y"),
(f * 10**-23, " 14.142y"),
(f * 10**-22, " 141.421y"),
(f * 10**-21, " 1.414z"),
(f * 10**-20, " 14.142z"),
(f * 10**-19, " 141.421z"),
(f * 10**-18, " 1.414a"),
(f * 10**-17, " 14.142a"),
(f * 10**-16, " 141.421a"),
(f * 10**-15, " 1.414f"),
(f * 10**-14, " 14.142f"),
(f * 10**-13, " 141.421f"),
(f * 10**-12, " 1.414p"),
(f * 10**-11, " 14.142p"),
(f * 10**-10, " 141.421p"),
(f * 10**-9, " 1.414n"),
(f * 10**-8, " 14.142n"),
(f * 10**-7, " 141.421n"),
(f * 10**-6, " 1.414u"),
(f * 10**-5, " 14.142u"),
(f * 10**-4, " 141.421u"),
(f * 10**-3, " 1.414m"),
(f * 10**-2, " 14.142m"),
(f * 10**-1, " 141.421m"),
(f * 10**0, " 1.414"),
(f * 10**1, " 14.142"),
(f * 10**2, " 141.421"),
(f * 10**3, " 1.414k"),
(f * 10**4, " 14.142k"),
(f * 10**5, " 141.421k"),
(f * 10**6, " 1.414M"),
(f * 10**7, " 14.142M"),
(f * 10**8, " 141.421M"),
(f * 10**9, " 1.414G"),
(f * 10**10, " 14.142G"),
(f * 10**11, " 141.421G"),
(f * 10**12, " 1.414T"),
(f * 10**13, " 14.142T"),
(f * 10**14, " 141.421T"),
(f * 10**15, " 1.414P"),
(f * 10**16, " 14.142P"),
(f * 10**17, " 141.421P"),
(f * 10**18, " 1.414E"),
(f * 10**19, " 14.142E"),
(f * 10**20, " 141.421E"),
(f * 10**21, " 1.414Z"),
(f * 10**22, " 14.142Z"),
(f * 10**23, " 141.421Z"),
(f * 10**24, " 1.414Y"),
(f * 10**25, " 14.142Y"),
(f * 10**26, " 141.421Y"),
]
self.compare_all(formatter, in_out)
def test_exponents_without_eng_prefix(self):
formatter = EngFormatter(accuracy=4, use_eng_prefix=False)
f = np.pi
in_out = [
(f * 10**-24, " 3.1416E-24"),
(f * 10**-23, " 31.4159E-24"),
(f * 10**-22, " 314.1593E-24"),
(f * 10**-21, " 3.1416E-21"),
(f * 10**-20, " 31.4159E-21"),
(f * 10**-19, " 314.1593E-21"),
(f * 10**-18, " 3.1416E-18"),
(f * 10**-17, " 31.4159E-18"),
(f * 10**-16, " 314.1593E-18"),
(f * 10**-15, " 3.1416E-15"),
(f * 10**-14, " 31.4159E-15"),
(f * 10**-13, " 314.1593E-15"),
(f * 10**-12, " 3.1416E-12"),
(f * 10**-11, " 31.4159E-12"),
(f * 10**-10, " 314.1593E-12"),
(f * 10**-9, " 3.1416E-09"),
(f * 10**-8, " 31.4159E-09"),
(f * 10**-7, " 314.1593E-09"),
(f * 10**-6, " 3.1416E-06"),
(f * 10**-5, " 31.4159E-06"),
(f * 10**-4, " 314.1593E-06"),
(f * 10**-3, " 3.1416E-03"),
(f * 10**-2, " 31.4159E-03"),
(f * 10**-1, " 314.1593E-03"),
(f * 10**0, " 3.1416E+00"),
(f * 10**1, " 31.4159E+00"),
(f * 10**2, " 314.1593E+00"),
(f * 10**3, " 3.1416E+03"),
(f * 10**4, " 31.4159E+03"),
(f * 10**5, " 314.1593E+03"),
(f * 10**6, " 3.1416E+06"),
(f * 10**7, " 31.4159E+06"),
(f * 10**8, " 314.1593E+06"),
(f * 10**9, " 3.1416E+09"),
(f * 10**10, " 31.4159E+09"),
(f * 10**11, " 314.1593E+09"),
(f * 10**12, " 3.1416E+12"),
(f * 10**13, " 31.4159E+12"),
(f * 10**14, " 314.1593E+12"),
(f * 10**15, " 3.1416E+15"),
(f * 10**16, " 31.4159E+15"),
(f * 10**17, " 314.1593E+15"),
(f * 10**18, " 3.1416E+18"),
(f * 10**19, " 31.4159E+18"),
(f * 10**20, " 314.1593E+18"),
(f * 10**21, " 3.1416E+21"),
(f * 10**22, " 31.4159E+21"),
(f * 10**23, " 314.1593E+21"),
(f * 10**24, " 3.1416E+24"),
(f * 10**25, " 31.4159E+24"),
(f * 10**26, " 314.1593E+24"),
]
self.compare_all(formatter, in_out)
def test_rounding(self):
formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
in_out = [
(5.55555, " 5.556"),
(55.5555, " 55.556"),
(555.555, " 555.555"),
(5555.55, " 5.556k"),
(55555.5, " 55.556k"),
(555555, " 555.555k"),
]
self.compare_all(formatter, in_out)
formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
in_out = [
(5.55555, " 5.6"),
(55.5555, " 55.6"),
(555.555, " 555.6"),
(5555.55, " 5.6k"),
(55555.5, " 55.6k"),
(555555, " 555.6k"),
]
self.compare_all(formatter, in_out)
formatter = EngFormatter(accuracy=0, use_eng_prefix=True)
in_out = [
(5.55555, " 6"),
(55.5555, " 56"),
(555.555, " 556"),
(5555.55, " 6k"),
(55555.5, " 56k"),
(555555, " 556k"),
]
self.compare_all(formatter, in_out)
formatter = EngFormatter(accuracy=3, use_eng_prefix=True)
result = formatter(0)
assert result == " 0.000"
def test_nan(self):
# Issue #11981
formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.nan)
assert result == "NaN"
df = DataFrame(
{
"a": [1.5, 10.3, 20.5],
"b": [50.3, 60.67, 70.12],
"c": [100.2, 101.33, 120.33],
}
)
pt = df.pivot_table(values="a", index="b", columns="c")
set_eng_float_format(accuracy=1)
result = pt.to_string()
assert "NaN" in result
def test_inf(self):
# Issue #11981
formatter = EngFormatter(accuracy=1, use_eng_prefix=True)
result = formatter(np.inf)
assert result == "inf"
|
TestEngFormatter
|
python
|
hynek__structlog
|
src/structlog/processors.py
|
{
"start": 20945,
"end": 23554
}
|
class ____(enum.Enum):
"""
Callsite parameters that can be added to an event dictionary with the
`structlog.processors.CallsiteParameterAdder` processor class.
The string values of the members of this enum will be used as the keys for
the callsite parameters in the event dictionary.
.. versionadded:: 21.5.0
.. versionadded:: 25.5.0
`QUAL_NAME` parameter.
"""
#: The full path to the python source file of the callsite.
PATHNAME = "pathname"
#: The basename part of the full path to the python source file of the
#: callsite.
FILENAME = "filename"
#: The python module the callsite was in. This mimics the module attribute
#: of `logging.LogRecord` objects and will be the basename, without
#: extension, of the full path to the python source file of the callsite.
MODULE = "module"
#: The name of the function that the callsite was in.
FUNC_NAME = "func_name"
#: The qualified name of the callsite (includes scope and class names).
#: Requires Python 3.11+.
QUAL_NAME = "qual_name"
#: The line number of the callsite.
LINENO = "lineno"
#: The ID of the thread the callsite was executed in.
THREAD = "thread"
#: The name of the thread the callsite was executed in.
THREAD_NAME = "thread_name"
#: The ID of the process the callsite was executed in.
PROCESS = "process"
#: The name of the process the callsite was executed in.
PROCESS_NAME = "process_name"
def _get_callsite_pathname(module: str, frame: FrameType) -> Any:
return frame.f_code.co_filename
def _get_callsite_filename(module: str, frame: FrameType) -> Any:
return os.path.basename(frame.f_code.co_filename)
def _get_callsite_module(module: str, frame: FrameType) -> Any:
return os.path.splitext(os.path.basename(frame.f_code.co_filename))[0]
def _get_callsite_func_name(module: str, frame: FrameType) -> Any:
return frame.f_code.co_name
def _get_callsite_qual_name(module: str, frame: FrameType) -> Any:
return frame.f_code.co_qualname # will crash on Python <3.11
def _get_callsite_lineno(module: str, frame: FrameType) -> Any:
return frame.f_lineno
def _get_callsite_thread(module: str, frame: FrameType) -> Any:
return threading.get_ident()
def _get_callsite_thread_name(module: str, frame: FrameType) -> Any:
return threading.current_thread().name
def _get_callsite_process(module: str, frame: FrameType) -> Any:
return os.getpid()
def _get_callsite_process_name(module: str, frame: FrameType) -> Any:
return get_processname()
|
CallsiteParameter
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarTuple27.py
|
{
"start": 225,
"end": 945
}
|
class ____(Generic[*Ts, T]): ...
def deco1(x: Callable[[*tuple[*Ts, int]], None]) -> tuple[*Ts]: ...
def deco2(x: Callable[[*tuple[*Ts, str]], None]) -> tuple[*Ts]: ...
def deco3(x: Callable[[*tuple[str, int]], None]) -> None: ...
def deco4(x: Callable[[*Ts, T], None]) -> A[*Ts, T]:
return A()
def func1(a: str, b: int) -> None: ...
def func2(a: str, b: str, c: int) -> None: ...
v1 = deco1(func1)
reveal_type(v1, expected_text="tuple[str]")
v2 = deco1(func2)
reveal_type(v2, expected_text="tuple[str, str]")
# This should generate an error.
deco2(func1)
deco3(func1)
v3 = deco4(func1)
reveal_type(v3, expected_text="A[str, int]")
v4 = deco4(func2)
reveal_type(v4, expected_text="A[str, str, int]")
|
A
|
python
|
django__django
|
tests/gis_tests/layermap/models.py
|
{
"start": 485,
"end": 816
}
|
class ____(NamedModel):
name_txt = models.TextField(default="")
name_short = models.CharField(max_length=5)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Meta:
app_label = "layermap"
|
City
|
python
|
pypa__hatch
|
backend/src/hatchling/builders/plugin/interface.py
|
{
"start": 1007,
"end": 16217
}
|
class ____(ABC, Generic[BuilderConfigBound, PluginManagerBound]):
"""
Example usage:
```python tab="plugin.py"
from hatchling.builders.plugin.interface import BuilderInterface
class SpecialBuilder(BuilderInterface):
PLUGIN_NAME = "special"
...
```
```python tab="hooks.py"
from hatchling.plugin import hookimpl
from .plugin import SpecialBuilder
@hookimpl
def hatch_register_builder():
return SpecialBuilder
```
"""
PLUGIN_NAME = ""
"""The name used for selection."""
def __init__(
self,
root: str,
plugin_manager: PluginManagerBound | None = None,
config: dict[str, Any] | None = None,
metadata: ProjectMetadata | None = None,
app: Application | None = None,
) -> None:
self.__root = root
self.__plugin_manager = cast(PluginManagerBound, plugin_manager)
self.__raw_config = config
self.__metadata = metadata
self.__app = app
self.__config = cast(BuilderConfigBound, None)
self.__project_config: dict[str, Any] | None = None
self.__hatch_config: dict[str, Any] | None = None
self.__build_config: dict[str, Any] | None = None
self.__build_targets: list[str] | None = None
self.__target_config: dict[str, Any] | None = None
# Metadata
self.__project_id: str | None = None
def build(
self,
*,
directory: str | None = None,
versions: list[str] | None = None,
hooks_only: bool | None = None,
clean: bool | None = None,
clean_hooks_after: bool | None = None,
clean_only: bool | None = False,
) -> Generator[str, None, None]:
# Fail early for invalid project metadata
self.metadata.validate_fields()
if directory is None:
directory = (
self.config.normalize_build_directory(os.environ[BuildEnvVars.LOCATION])
if BuildEnvVars.LOCATION in os.environ
else self.config.directory
)
if not os.path.isdir(directory):
os.makedirs(directory)
version_api = self.get_version_api()
versions = versions or self.config.versions
if versions:
unknown_versions = set(versions) - set(version_api)
if unknown_versions:
message = (
f"Unknown versions for target `{self.PLUGIN_NAME}`: {', '.join(map(str, sorted(unknown_versions)))}"
)
raise ValueError(message)
if hooks_only is None:
hooks_only = env_var_enabled(BuildEnvVars.HOOKS_ONLY)
configured_build_hooks = self.get_build_hooks(directory)
build_hooks = list(configured_build_hooks.values())
if clean_only:
clean = True
elif clean is None:
clean = env_var_enabled(BuildEnvVars.CLEAN)
if clean:
if not hooks_only:
self.clean(directory, versions)
for build_hook in build_hooks:
build_hook.clean(versions)
if clean_only:
return
if clean_hooks_after is None:
clean_hooks_after = env_var_enabled(BuildEnvVars.CLEAN_HOOKS_AFTER)
for version in versions:
self.app.display_debug(f"Building `{self.PLUGIN_NAME}` version `{version}`")
build_data = self.get_default_build_data()
self.set_build_data_defaults(build_data)
# Allow inspection of configured build hooks and the order in which they run
build_data["build_hooks"] = tuple(configured_build_hooks)
# Execute all `initialize` build hooks
for build_hook in build_hooks:
build_hook.initialize(version, build_data)
if hooks_only:
self.app.display_debug(f"Only ran build hooks for `{self.PLUGIN_NAME}` version `{version}`")
continue
# Build the artifact
with self.config.set_build_data(build_data):
artifact = version_api[version](directory, **build_data)
# Execute all `finalize` build hooks
for build_hook in build_hooks:
build_hook.finalize(version, build_data, artifact)
if clean_hooks_after:
for build_hook in build_hooks:
build_hook.clean([version])
yield artifact
def recurse_included_files(self) -> Iterable[IncludedFile]:
"""
Returns a consistently generated series of file objects for every file that should be distributed. Each file
object has three `str` attributes:
- `path` - the absolute path
- `relative_path` - the path relative to the project root; will be an empty string for external files
- `distribution_path` - the path to be distributed as
"""
yield from self.recurse_selected_project_files()
yield from self.recurse_forced_files(self.config.get_force_include())
def recurse_selected_project_files(self) -> Iterable[IncludedFile]:
if self.config.only_include:
yield from self.recurse_explicit_files(self.config.only_include)
else:
yield from self.recurse_project_files()
def recurse_project_files(self) -> Iterable[IncludedFile]:
for root, dirs, files in safe_walk(self.root):
relative_path = get_relative_path(root, self.root)
dirs[:] = sorted(d for d in dirs if not self.config.directory_is_excluded(d, relative_path))
files.sort()
is_package = "__init__.py" in files
for f in files:
if f in EXCLUDED_FILES:
continue
relative_file_path = os.path.join(relative_path, f)
distribution_path = self.config.get_distribution_path(relative_file_path)
if self.config.path_is_reserved(distribution_path):
continue
if self.config.include_path(relative_file_path, is_package=is_package):
yield IncludedFile(
os.path.join(root, f), relative_file_path, self.config.get_distribution_path(relative_file_path)
)
def recurse_forced_files(self, inclusion_map: dict[str, str]) -> Iterable[IncludedFile]:
for source, target_path in inclusion_map.items():
external = not source.startswith(self.root)
if os.path.isfile(source):
yield IncludedFile(
source,
"" if external else os.path.relpath(source, self.root),
self.config.get_distribution_path(target_path),
)
elif os.path.isdir(source):
for root, dirs, files in safe_walk(source):
relative_directory = get_relative_path(root, source)
dirs[:] = sorted(d for d in dirs if d not in EXCLUDED_DIRECTORIES)
files.sort()
for f in files:
if f in EXCLUDED_FILES:
continue
relative_file_path = os.path.join(target_path, relative_directory, f)
distribution_path = self.config.get_distribution_path(relative_file_path)
if not self.config.path_is_reserved(distribution_path):
yield IncludedFile(
os.path.join(root, f),
"" if external else relative_file_path,
distribution_path,
)
else:
msg = f"Forced include not found: {source}"
raise FileNotFoundError(msg)
def recurse_explicit_files(self, inclusion_map: dict[str, str]) -> Iterable[IncludedFile]:
for source, target_path in inclusion_map.items():
external = not source.startswith(self.root)
if os.path.isfile(source):
distribution_path = self.config.get_distribution_path(target_path)
if not self.config.path_is_reserved(distribution_path):
yield IncludedFile(
source,
"" if external else os.path.relpath(source, self.root),
self.config.get_distribution_path(target_path),
)
elif os.path.isdir(source):
for root, dirs, files in safe_walk(source):
relative_directory = get_relative_path(root, source)
dirs[:] = sorted(d for d in dirs if d not in EXCLUDED_DIRECTORIES)
files.sort()
is_package = "__init__.py" in files
for f in files:
if f in EXCLUDED_FILES:
continue
relative_file_path = os.path.join(target_path, relative_directory, f)
distribution_path = self.config.get_distribution_path(relative_file_path)
if self.config.path_is_reserved(distribution_path):
continue
if self.config.include_path(relative_file_path, explicit=True, is_package=is_package):
yield IncludedFile(
os.path.join(root, f), "" if external else relative_file_path, distribution_path
)
@property
def root(self) -> str:
"""
The root of the project tree.
"""
return self.__root
@property
def plugin_manager(self) -> PluginManagerBound:
if self.__plugin_manager is None:
from hatchling.plugin.manager import PluginManager
self.__plugin_manager = PluginManager()
return self.__plugin_manager
@property
def metadata(self) -> ProjectMetadata:
if self.__metadata is None:
from hatchling.metadata.core import ProjectMetadata
self.__metadata = ProjectMetadata(self.root, self.plugin_manager, self.__raw_config)
return self.__metadata
@property
def app(self) -> Application:
"""
An instance of [Application](../utilities.md#hatchling.bridge.app.Application).
"""
if self.__app is None:
from hatchling.bridge.app import Application
self.__app = cast(Application, Application().get_safe_application())
return self.__app
@property
def raw_config(self) -> dict[str, Any]:
if self.__raw_config is None:
self.__raw_config = self.metadata.config
return self.__raw_config
@property
def project_config(self) -> dict[str, Any]:
if self.__project_config is None:
self.__project_config = self.metadata.core.config
return self.__project_config
@property
def hatch_config(self) -> dict[str, Any]:
if self.__hatch_config is None:
self.__hatch_config = self.metadata.hatch.config
return self.__hatch_config
@property
def config(self) -> BuilderConfigBound:
"""
An instance of [BuilderConfig](../utilities.md#hatchling.builders.config.BuilderConfig).
"""
if self.__config is None:
self.__config = self.get_config_class()(
self, self.root, self.PLUGIN_NAME, self.build_config, self.target_config
)
return self.__config
@property
def build_config(self) -> dict[str, Any]:
"""
```toml config-example
[tool.hatch.build]
```
"""
if self.__build_config is None:
self.__build_config = self.metadata.hatch.build_config
return self.__build_config
@property
def target_config(self) -> dict[str, Any]:
"""
```toml config-example
[tool.hatch.build.targets.<PLUGIN_NAME>]
```
"""
if self.__target_config is None:
target_config: dict[str, Any] = self.metadata.hatch.build_targets.get(self.PLUGIN_NAME, {})
if not isinstance(target_config, dict):
message = f"Field `tool.hatch.build.targets.{self.PLUGIN_NAME}` must be a table"
raise TypeError(message)
self.__target_config = target_config
return self.__target_config
@property
def project_id(self) -> str:
if self.__project_id is None:
self.__project_id = f"{self.normalize_file_name_component(self.metadata.core.name)}-{self.metadata.version}"
return self.__project_id
def get_build_hooks(self, directory: str) -> dict[str, BuildHookInterface]:
configured_build_hooks = {}
for hook_name, config in self.config.hook_config.items():
build_hook = self.plugin_manager.build_hook.get(hook_name)
if build_hook is None:
from hatchling.plugin.exceptions import UnknownPluginError
message = f"Unknown build hook: {hook_name}"
raise UnknownPluginError(message)
configured_build_hooks[hook_name] = build_hook(
self.root, config, self.config, self.metadata, directory, self.PLUGIN_NAME, self.app
)
return configured_build_hooks
@abstractmethod
def get_version_api(self) -> dict[str, Callable]:
"""
A mapping of `str` versions to a callable that is used for building.
Each callable must have the following signature:
```python
def ...(build_dir: str, build_data: dict) -> str:
```
The return value must be the absolute path to the built artifact.
"""
def get_default_versions(self) -> list[str]:
"""
A list of versions to build when users do not specify any, defaulting to all versions.
"""
return list(self.get_version_api())
def get_default_build_data(self) -> dict[str, Any]: # noqa: PLR6301
"""
A mapping that can be modified by [build hooks](../build-hook/reference.md) to influence the behavior of builds.
"""
return {}
def set_build_data_defaults(self, build_data: dict[str, Any]) -> None: # noqa: PLR6301
build_data.setdefault("artifacts", [])
build_data.setdefault("force_include", {})
def clean(self, directory: str, versions: list[str]) -> None:
"""
Called before builds if the `-c`/`--clean` flag was passed to the
[`build`](../../cli/reference.md#hatch-build) command.
"""
@classmethod
def get_config_class(cls) -> type[BuilderConfig]:
"""
Must return a subclass of [BuilderConfig](../utilities.md#hatchling.builders.config.BuilderConfig).
"""
return BuilderConfig
@staticmethod
def normalize_file_name_component(file_name: str) -> str:
"""
https://peps.python.org/pep-0427/#escaping-and-unicode
"""
return re.sub(r"[^\w\d.]+", "_", file_name, flags=re.UNICODE)
|
BuilderInterface
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_itertools.py
|
{
"start": 111089,
"end": 113883
}
|
class ____(__TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(next(z))
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = map(g, items)
z = zip(*[gen]*len(tuple1))
next(z)
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
@support.skip_if_pgo_task
@support.requires_resource('cpu')
@slowTest
def test_long_chain_of_empty_iterables(self):
# Make sure itertools.chain doesn't run into recursion limits when
# dealing with long chains of empty iterables. Even with a high
# number this would probably only fail in Py_DEBUG mode.
it = chain.from_iterable(() for unused in range(10000000))
with self.assertRaises(StopIteration):
next(it)
def test_issue30347_1(self):
def f(n):
if n == 5:
list(b)
return n != 6
for (k, b) in groupby(range(10), f):
list(b) # shouldn't crash
def test_issue30347_2(self):
class K:
def __init__(self, v):
pass
def __eq__(self, other):
nonlocal i
i += 1
if i == 1:
next(g, None)
return True
i = 0
g = next(groupby(range(10), K))[1]
for j in range(2):
next(g, None) # shouldn't crash
|
RegressionTests
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_vendored/dateutil/tz/tz.py
|
{
"start": 28028,
"end": 33860
}
|
class ____(tzrangebase):
"""
The ``tzrange`` object is a time zone specified by a set of offsets and
abbreviations, equivalent to the way the ``TZ`` variable can be specified
in POSIX-like systems, but using Python delta objects to specify DST
start, end and offsets.
:param stdabbr:
The abbreviation for standard time (e.g. ``'EST'``).
:param stdoffset:
An integer or :class:`datetime.timedelta` object or equivalent
specifying the base offset from UTC.
If unspecified, +00:00 is used.
:param dstabbr:
The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
If specified, with no other DST information, DST is assumed to occur
and the default behavior or ``dstoffset``, ``start`` and ``end`` is
used. If unspecified and no other DST information is specified, it
is assumed that this zone has no DST.
If this is unspecified and other DST information is *is* specified,
DST occurs in the zone but the time zone abbreviation is left
unchanged.
:param dstoffset:
A an integer or :class:`datetime.timedelta` object or equivalent
specifying the UTC offset during DST. If unspecified and any other DST
information is specified, it is assumed to be the STD offset +1 hour.
:param start:
A :class:`relativedelta.relativedelta` object or equivalent specifying
the time and time of year that daylight savings time starts. To
specify, for example, that DST starts at 2AM on the 2nd Sunday in
March, pass:
``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
If unspecified and any other DST information is specified, the default
value is 2 AM on the first Sunday in April.
:param end:
A :class:`relativedelta.relativedelta` object or equivalent
representing the time and time of year that daylight savings time
ends, with the same specification method as in ``start``. One note is
that this should point to the first time in the *standard* zone, so if
a transition occurs at 2AM in the DST zone and the clocks are set back
1 hour to 1AM, set the ``hours`` parameter to +1.
**Examples:**
.. testsetup:: tzrange
from dateutil.tz import tzrange, tzstr
.. doctest:: tzrange
>>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
True
>>> from dateutil.relativedelta import *
>>> range1 = tzrange("EST", -18000, "EDT")
>>> range2 = tzrange("EST", -18000, "EDT", -14400,
... relativedelta(hours=+2, month=4, day=1,
... weekday=SU(+1)),
... relativedelta(hours=+1, month=10, day=31,
... weekday=SU(-1)))
>>> tzstr('EST5EDT') == range1 == range2
True
"""
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
# CHANGED IN VENDORED VERSION
from .. import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
try:
stdoffset = stdoffset.total_seconds()
except (TypeError, AttributeError):
pass
try:
dstoffset = dstoffset.total_seconds()
except (TypeError, AttributeError):
pass
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
self._dst_base_offset_ = self._dst_offset - self._std_offset
self.hasdst = bool(self._start_delta)
def transitions(self, year):
"""
For a given year, get the DST on and off transition times, expressed
always on the standard time side. For zones with no transitions, this
function returns ``None``.
:param year:
The year whose transitions you would like to query.
:return:
Returns a :class:`tuple` of :class:`datetime.datetime` objects,
``(dston, dstoff)`` for zones with an annual DST transition, or
``None`` for fixed offset zones.
"""
if not self.hasdst:
return None
base_year = datetime.datetime(year, 1, 1)
start = base_year + self._start_delta
end = base_year + self._end_delta
return (start, end)
def __eq__(self, other):
if not isinstance(other, tzrange):
return NotImplemented
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
@property
def _dst_base_offset(self):
return self._dst_base_offset_
@six.add_metaclass(_TzStrFactory)
|
tzrange
|
python
|
Textualize__textual
|
tests/test_path.py
|
{
"start": 281,
"end": 361
}
|
class ____(App[None]):
CSS_PATH = Path("/tmp/test.tcss")
|
AbsolutePathObjectApp
|
python
|
django__django
|
tests/admin_changelist/admin.py
|
{
"start": 3818,
"end": 4046
}
|
class ____(admin.ModelAdmin):
list_display_links = None
list_display = ["name"]
list_editable = ["name"]
actions_on_bottom = True
site.register(Parent, NoListDisplayLinksParentAdmin)
|
NoListDisplayLinksParentAdmin
|
python
|
wandb__wandb
|
wandb/_pydantic/base.py
|
{
"start": 5116,
"end": 5727
}
|
class ____(GQLBase, ABC):
# For GraphQL inputs, exclude null values when preparing JSON-able request
# data.
__DUMP_DEFAULTS: ClassVar[Dict[str, Any]] = dict(exclude_none=True)
@override
def model_dump(self, *, mode: str = "json", **kwargs: Any) -> dict[str, Any]:
kwargs = {**self.__DUMP_DEFAULTS, **kwargs}
return super().model_dump(mode=mode, **kwargs)
@override
def model_dump_json(self, *, indent: int | None = None, **kwargs: Any) -> str:
kwargs = {**self.__DUMP_DEFAULTS, **kwargs}
return super().model_dump_json(indent=indent, **kwargs)
|
GQLInput
|
python
|
jazzband__django-pipeline
|
tests/tests/test_compiler.py
|
{
"start": 5997,
"end": 6831
}
|
class ____(TestCase):
def setUp(self):
default_collector.collect()
self.compiler = Compiler()
def test_compile(self):
with self.assertRaises(CompilerError) as cm:
self.compiler.compile([_("pipeline/js/dummy.coffee")])
e = cm.exception
self.assertEqual(
e.command,
[
"this-exists-nowhere-as-a-command-and-should-fail",
"pipeline/js/dummy.coffee",
"pipeline/js/dummy.junk",
],
)
self.assertEqual(e.error_output, "")
def tearDown(self):
default_collector.clear()
@skipIf(sys.platform.startswith("win"), "requires posix platform")
@pipeline_settings(COMPILERS=["tests.tests.test_compiler.FailingCompiler"])
|
InvalidCompilerTest
|
python
|
getsentry__sentry
|
src/sentry/integrations/services/integration/model.py
|
{
"start": 662,
"end": 1699
}
|
class ____(RpcModel):
id: int
provider: str
external_id: str
name: str
metadata: dict[str, Any] = Field(repr=False)
status: int
def __hash__(self) -> int:
return hash(self.id)
def get_status_display(self) -> str:
for status_id, display in ObjectStatus.as_choices():
if status_id == self.status:
return display
return "disabled"
def get_provider(self) -> IntegrationProvider:
from sentry.integrations.models.utils import get_provider
return get_provider(instance=self)
def get_installation(self, organization_id: int, **kwargs: Any) -> IntegrationInstallation:
from sentry.integrations.models.utils import get_installation
return get_installation(instance=self, organization_id=organization_id, **kwargs)
def has_feature(self, feature: IntegrationFeatures) -> bool:
from sentry.integrations.models.utils import has_feature
return has_feature(instance=self, feature=feature)
|
RpcIntegration
|
python
|
RaRe-Technologies__gensim
|
gensim/test/test_word2vec.py
|
{
"start": 57536,
"end": 58536
}
|
class ____(unittest.TestCase):
def test_word2vec_stand_alone_script(self):
"""Does Word2Vec script launch standalone?"""
cmd = [
sys.executable, '-m', 'gensim.scripts.word2vec_standalone',
'-train', datapath('testcorpus.txt'),
'-output', 'vec.txt', '-size', '200', '-sample', '1e-4',
'-binary', '0', '-iter', '3', '-min_count', '1',
]
output = check_output(args=cmd, stderr=subprocess.PIPE)
self.assertEqual(output, b'')
if not hasattr(TestWord2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestWord2VecModel, 'assertLess', assertLess)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(threadName)s : %(levelname)s : %(message)s',
level=logging.DEBUG
)
unittest.main(module='gensim.test.test_word2vec')
|
TestWord2VecScripts
|
python
|
tensorflow__tensorflow
|
tensorflow/python/module/module_test.py
|
{
"start": 13042,
"end": 13161
}
|
class ____(module.Module, metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, x):
pass
|
AbstractModule
|
python
|
jazzband__django-redis
|
django_redis/cache.py
|
{
"start": 1115,
"end": 8190
}
|
class ____(BaseCache):
def __init__(self, server: str, params: dict[str, Any]) -> None:
super().__init__(params)
self._server = server
self._params = params
self._default_scan_itersize = getattr(
settings,
"DJANGO_REDIS_SCAN_ITERSIZE",
10,
)
options = params.get("OPTIONS", {})
self._client_cls = options.get(
"CLIENT_CLASS",
"django_redis.client.DefaultClient",
)
self._client_cls = import_string(self._client_cls)
self._client = None
self._ignore_exceptions = options.get(
"IGNORE_EXCEPTIONS",
getattr(settings, "DJANGO_REDIS_IGNORE_EXCEPTIONS", False),
)
self._log_ignored_exceptions = getattr(
settings,
"DJANGO_REDIS_LOG_IGNORED_EXCEPTIONS",
False,
)
self.logger = (
logging.getLogger(getattr(settings, "DJANGO_REDIS_LOGGER", __name__))
if self._log_ignored_exceptions
else None
)
@property
def client(self):
"""
Lazy client connection property.
"""
if self._client is None:
self._client = self._client_cls(self._server, self._params, self)
return self._client
@omit_exception
def set(self, *args, **kwargs):
return self.client.set(*args, **kwargs)
@omit_exception
def incr_version(self, *args, **kwargs):
return self.client.incr_version(*args, **kwargs)
@omit_exception
def add(self, *args, **kwargs):
return self.client.add(*args, **kwargs)
def get(self, key, default=None, version=None, client=None):
value = self._get(key, default, version, client)
if value is CONNECTION_INTERRUPTED:
value = default
return value
@omit_exception(return_value=CONNECTION_INTERRUPTED)
def _get(self, key, default, version, client):
return self.client.get(key, default=default, version=version, client=client)
@omit_exception
def delete(self, *args, **kwargs):
"""returns a boolean instead of int since django version 3.1"""
result = self.client.delete(*args, **kwargs)
return bool(result) if DJANGO_VERSION >= (3, 1, 0) else result
@omit_exception
def delete_pattern(self, *args, **kwargs):
kwargs.setdefault("itersize", self._default_scan_itersize)
return self.client.delete_pattern(*args, **kwargs)
@omit_exception
def delete_many(self, *args, **kwargs):
return self.client.delete_many(*args, **kwargs)
@omit_exception
def clear(self):
return self.client.clear()
@omit_exception(return_value={})
def get_many(self, *args, **kwargs):
return self.client.get_many(*args, **kwargs)
@omit_exception
def set_many(self, *args, **kwargs):
return self.client.set_many(*args, **kwargs)
@omit_exception
def incr(self, *args, **kwargs):
return self.client.incr(*args, **kwargs)
@omit_exception
def decr(self, *args, **kwargs):
return self.client.decr(*args, **kwargs)
@omit_exception
def has_key(self, *args, **kwargs):
return self.client.has_key(*args, **kwargs)
@omit_exception
def keys(self, *args, **kwargs):
return self.client.keys(*args, **kwargs)
@omit_exception
def iter_keys(self, *args, **kwargs):
return self.client.iter_keys(*args, **kwargs)
@omit_exception
def ttl(self, *args, **kwargs):
return self.client.ttl(*args, **kwargs)
@omit_exception
def pttl(self, *args, **kwargs):
return self.client.pttl(*args, **kwargs)
@omit_exception
def persist(self, *args, **kwargs):
return self.client.persist(*args, **kwargs)
@omit_exception
def expire(self, *args, **kwargs):
return self.client.expire(*args, **kwargs)
@omit_exception
def expire_at(self, *args, **kwargs):
return self.client.expire_at(*args, **kwargs)
@omit_exception
def pexpire(self, *args, **kwargs):
return self.client.pexpire(*args, **kwargs)
@omit_exception
def pexpire_at(self, *args, **kwargs):
return self.client.pexpire_at(*args, **kwargs)
@omit_exception
def lock(self, *args, **kwargs):
return self.client.lock(*args, **kwargs)
@omit_exception
def close(self, **kwargs):
self.client.close(**kwargs)
@omit_exception
def touch(self, *args, **kwargs):
return self.client.touch(*args, **kwargs)
@omit_exception
def sadd(self, *args, **kwargs):
return self.client.sadd(*args, **kwargs)
@omit_exception
def scard(self, *args, **kwargs):
return self.client.scard(*args, **kwargs)
@omit_exception
def sdiff(self, *args, **kwargs):
return self.client.sdiff(*args, **kwargs)
@omit_exception
def sdiffstore(self, *args, **kwargs):
return self.client.sdiffstore(*args, **kwargs)
@omit_exception
def sinter(self, *args, **kwargs):
return self.client.sinter(*args, **kwargs)
@omit_exception
def sinterstore(self, *args, **kwargs):
return self.client.sinterstore(*args, **kwargs)
@omit_exception
def sismember(self, *args, **kwargs):
return self.client.sismember(*args, **kwargs)
@omit_exception
def smembers(self, *args, **kwargs):
return self.client.smembers(*args, **kwargs)
@omit_exception
def smove(self, *args, **kwargs):
return self.client.smove(*args, **kwargs)
@omit_exception
def spop(self, *args, **kwargs):
return self.client.spop(*args, **kwargs)
@omit_exception
def srandmember(self, *args, **kwargs):
return self.client.srandmember(*args, **kwargs)
@omit_exception
def srem(self, *args, **kwargs):
return self.client.srem(*args, **kwargs)
@omit_exception
def sscan(self, *args, **kwargs):
return self.client.sscan(*args, **kwargs)
@omit_exception
def sscan_iter(self, *args, **kwargs):
return self.client.sscan_iter(*args, **kwargs)
@omit_exception
def smismember(self, *args, **kwargs):
return self.client.smismember(*args, **kwargs)
@omit_exception
def sunion(self, *args, **kwargs):
return self.client.sunion(*args, **kwargs)
@omit_exception
def sunionstore(self, *args, **kwargs):
return self.client.sunionstore(*args, **kwargs)
@omit_exception
def hset(self, *args, **kwargs):
return self.client.hset(*args, **kwargs)
@omit_exception
def hdel(self, *args, **kwargs):
return self.client.hdel(*args, **kwargs)
@omit_exception
def hlen(self, *args, **kwargs):
return self.client.hlen(*args, **kwargs)
@omit_exception
def hkeys(self, *args, **kwargs):
return self.client.hkeys(*args, **kwargs)
@omit_exception
def hexists(self, *args, **kwargs):
return self.client.hexists(*args, **kwargs)
|
RedisCache
|
python
|
pypa__virtualenv
|
src/virtualenv/create/via_global_ref/builtin/graalpy/__init__.py
|
{
"start": 345,
"end": 2330
}
|
class ____(ViaGlobalRefVirtualenvBuiltin, ABC):
@classmethod
def can_describe(cls, interpreter):
return interpreter.implementation == "GraalVM" and super().can_describe(interpreter)
@classmethod
def exe_stem(cls):
return "graalpy"
@classmethod
def exe_names(cls, interpreter):
return {
cls.exe_stem(),
"python",
f"python{interpreter.version_info.major}",
f"python{interpreter.version_info.major}.{interpreter.version_info.minor}",
}
@classmethod
def _executables(cls, interpreter):
host = Path(interpreter.system_executable)
targets = sorted(f"{name}{cls.suffix}" for name in cls.exe_names(interpreter))
yield host, targets, RefMust.NA, RefWhen.ANY
@classmethod
def sources(cls, interpreter):
yield from super().sources(interpreter)
python_dir = Path(interpreter.system_executable).resolve().parent
if python_dir.name in {"bin", "Scripts"}:
python_dir = python_dir.parent
native_lib = cls._native_lib(python_dir / "lib", interpreter.platform)
if native_lib.exists():
yield PathRefToDest(native_lib, dest=lambda self, s: self.bin_dir.parent / "lib" / s.name)
for jvm_dir_name in ("jvm", "jvmlibs", "modules"):
jvm_dir = python_dir / jvm_dir_name
if jvm_dir.exists():
yield PathRefToDest(jvm_dir, dest=lambda self, s: self.bin_dir.parent / s.name)
@classmethod
def _shared_libs(cls, python_dir):
raise NotImplementedError
def set_pyenv_cfg(self):
super().set_pyenv_cfg()
# GraalPy 24.0 and older had home without the bin
version = self.interpreter.version_info
if version.major == 3 and version.minor <= 10: # noqa: PLR2004
home = Path(self.pyenv_cfg["home"])
if home.name == "bin":
self.pyenv_cfg["home"] = str(home.parent)
|
GraalPy
|
python
|
huggingface__transformers
|
tests/models/biogpt/test_tokenization_biogpt.py
|
{
"start": 919,
"end": 4277
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/biogpt"
tokenizer_class = BioGptTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
cls.vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
cls.merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdir:
vocab_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["vocab_file"])
merges_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["merges_file"])
shutil.copy(self.vocab_file, vocab_file)
shutil.copy(self.merges_file, merges_file)
with open(vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
tokenizer = BioGptTokenizer(vocab_file, merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
self.assertTrue(encoded_sentence == [2] + text)
self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
|
BioGptTokenizationTest
|
python
|
getsentry__sentry
|
src/sentry/integrations/pagerduty/actions/notification.py
|
{
"start": 1000,
"end": 6673
}
|
class ____(IntegrationEventAction):
id = "sentry.integrations.pagerduty.notify_action.PagerDutyNotifyServiceAction"
label = "Send a notification to PagerDuty account {account} and service {service} with {severity} severity"
prompt = "Send a PagerDuty notification"
provider = IntegrationProviderSlug.PAGERDUTY.value
integration_key = "account"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.form_fields = {
"account": {
"type": "choice",
"choices": [(i.id, i.name) for i in self.get_integrations()],
},
"service": {"type": "choice", "choices": self.get_services()},
"severity": {
"type": "choice",
"choices": [
("default", "default"),
("critical", "critical"),
("warning", "warning"),
("error", "error"),
("info", "info"),
],
},
}
def _get_service(self) -> PagerDutyService | None:
oi = self.get_organization_integration()
if not oi:
return None
for pds in oi.config.get("pagerduty_services", []):
if str(pds["id"]) == str(self.get_option("service")):
return pds
return None
def after(
self, event: GroupEvent, notification_uuid: str | None = None
) -> Generator[CallbackFuture]:
integration = self.get_integration()
log_context = {
"organization_id": self.project.organization_id,
"integration_id": self.get_option("account"),
"service": self.get_option("service"),
}
if not integration:
# integration removed but rule still exists
logger.info("pagerduty.org_integration_missing", extra=log_context)
return
service = self._get_service()
if not service:
logger.info("pagerduty.service_missing", extra=log_context)
return
severity = cast(
PagerdutySeverity, self.get_option("severity", default=PAGERDUTY_DEFAULT_SEVERITY)
)
def send_notification(event: GroupEvent, futures: Sequence[RuleFuture]) -> None:
installation = integration.get_installation(self.project.organization_id)
try:
client = installation.get_keyring_client(self.get_option("service"))
except Exception as e:
sentry_sdk.capture_exception(e)
return
data = build_pagerduty_event_payload(
routing_key=client.integration_key,
event=event,
notification_uuid=notification_uuid,
severity=severity,
)
rules: list[Rule] = [f.rule for f in futures]
rule = rules[0] if rules else None
if rule and rule.label:
data["payload"]["summary"] = truncatechars(
f"[{rule.label}]: {data['payload']['summary']}", PAGERDUTY_SUMMARY_MAX_LENGTH
)
try:
resp = client.send_trigger(data=data)
except ApiError as e:
self.logger.info(
"rule.fail.pagerduty_trigger",
extra={
"error": str(e),
"service_name": service["service_name"],
"service_id": service["id"],
"project_id": event.project_id,
"event_id": event.event_id,
},
)
raise
self.record_notification_sent(event, str(service["id"]), rule, notification_uuid)
# TODO(meredith): Maybe have a generic success log statements for
# first-party integrations similar to plugin `notification.dispatched`
self.logger.info(
"rule.success.pagerduty_trigger",
extra={
"status_code": resp.status_code,
"project_id": event.project_id,
"event_id": event.event_id,
"service_name": service["service_name"],
"service_id": service["id"],
},
)
key = f"pagerduty:{integration.id}:{service['id']}:{severity}"
yield self.future(send_notification, key=key)
def get_services(self) -> Sequence[tuple[int, str]]:
from sentry.integrations.services.integration import integration_service
organization_integrations = integration_service.get_organization_integrations(
providers=[self.provider], organization_id=self.project.organization_id
)
return [
(v["id"], v["service_name"])
for oi in organization_integrations
for v in oi.config.get("pagerduty_services", [])
]
def render_label(self) -> str:
s = self._get_service()
if s:
service_name = s["service_name"]
else:
service_name = "[removed]"
severity = self.get_option("severity", default=PAGERDUTY_DEFAULT_SEVERITY)
return self.label.format(
account=self.get_integration_name(),
service=service_name,
severity=severity,
)
def get_form_instance(self) -> PagerDutyNotifyServiceForm:
return PagerDutyNotifyServiceForm(
self.data,
integrations=self.get_integrations(),
services=self.get_services(),
)
|
PagerDutyNotifyServiceAction
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/tests/test_utils.py
|
{
"start": 700,
"end": 21978
}
|
class ____:
@staticmethod
def test_combine_requests_with_affinity():
AFFINITY = ResourceRequestUtil.PlacementConstraintType.AFFINITY
ANTI_AFFINITY = ResourceRequestUtil.PlacementConstraintType.ANTI_AFFINITY
rqs = [
ResourceRequestUtil.make({"CPU": 1}, [(AFFINITY, "1", "1")]), # 1
ResourceRequestUtil.make({"CPU": 2}, [(AFFINITY, "1", "1")]), # 1
ResourceRequestUtil.make({"CPU": 1}, [(AFFINITY, "2", "2")]), # 2
ResourceRequestUtil.make({"CPU": 1}, [(AFFINITY, "2", "2")]), # 2
ResourceRequestUtil.make({"CPU": 1}, [(ANTI_AFFINITY, "2", "2")]), # 3
ResourceRequestUtil.make({"CPU": 1}, [(ANTI_AFFINITY, "2", "2")]), # 4
ResourceRequestUtil.make({"CPU": 1}), # 5
]
rq_result = ResourceRequestUtil.combine_requests_with_affinity(rqs)
assert len(rq_result) == 5
actual = ResourceRequestUtil.to_dict_list(rq_result)
expected = [
ResourceRequestUtil.to_dict(
ResourceRequestUtil.make(
{"CPU": 3}, # Combined
[
(AFFINITY, "1", "1"),
],
)
),
ResourceRequestUtil.to_dict(
ResourceRequestUtil.make(
{"CPU": 2}, # Combined
[
(AFFINITY, "2", "2"),
],
)
),
ResourceRequestUtil.to_dict(
ResourceRequestUtil.make(
{"CPU": 1},
[(ANTI_AFFINITY, "2", "2")],
)
),
ResourceRequestUtil.to_dict(
ResourceRequestUtil.make(
{"CPU": 1},
[(ANTI_AFFINITY, "2", "2")],
)
),
ResourceRequestUtil.to_dict(
ResourceRequestUtil.make(
{"CPU": 1},
)
),
]
actual_str_serialized = [str(x) for x in actual]
expected_str_serialized = [str(x) for x in expected]
assert sorted(actual_str_serialized) == sorted(expected_str_serialized)
def test_cluster_status_parser_cluster_resource_state():
test_data = {
"cluster_resource_state": {
"node_states": [
{
"node_id": b"1" * 4,
"instance_id": "instance1",
"ray_node_type_name": "head_node",
"available_resources": {
"CPU": 0.5,
"GPU": 2.0,
},
"total_resources": {
"CPU": 1,
"GPU": 2.0,
},
"status": "RUNNING",
"node_ip_address": "10.10.10.10",
"instance_type_name": "m5.large",
},
{
"node_id": b"2" * 4,
"instance_id": "instance2",
"ray_node_type_name": "worker_node",
"available_resources": {},
"total_resources": {
"CPU": 1,
"GPU": 2.0,
},
"status": "DEAD",
"node_ip_address": "22.22.22.22",
"instance_type_name": "m5.large",
},
{
"node_id": b"3" * 4,
"instance_id": "instance3",
"ray_node_type_name": "worker_node",
"available_resources": {
"CPU": 1.0,
"GPU": 2.0,
},
"total_resources": {
"CPU": 1,
"GPU": 2.0,
},
"idle_duration_ms": 100,
"status": "IDLE",
"node_ip_address": "22.22.22.22",
"instance_type_name": "m5.large",
},
],
"pending_gang_resource_requests": [
{
"requests": [
{
"resources_bundle": {"CPU": 1, "GPU": 1},
"placement_constraints": [
{
"anti_affinity": {
"label_name": "_PG_1x1x",
"label_value": "",
}
}
],
},
],
"details": "1x1x:STRICT_SPREAD|PENDING",
},
{
"requests": [
{
"resources_bundle": {"GPU": 2},
"placement_constraints": [
{
"affinity": {
"label_name": "_PG_2x2x",
"label_value": "",
}
}
],
},
],
"details": "2x2x:STRICT_PACK|PENDING",
},
],
"pending_resource_requests": [
{
"request": {
"resources_bundle": {"CPU": 1, "GPU": 1},
"placement_constraints": [],
},
"count": 1,
},
],
"cluster_resource_constraints": [
{
"resource_requests": [
{
"request": {
"resources_bundle": {"GPU": 2, "CPU": 100},
"placement_constraints": [],
},
"count": 1,
},
]
}
],
"cluster_resource_state_version": 10,
},
"autoscaling_state": {},
}
reply = _gen_cluster_status_reply(test_data)
stats = Stats(gcs_request_time_s=0.1)
cluster_status = ClusterStatusParser.from_get_cluster_status_reply(reply, stats)
# Assert on health nodes
assert len(cluster_status.idle_nodes) + len(cluster_status.active_nodes) == 2
assert cluster_status.active_nodes[0].instance_id == "instance1"
assert cluster_status.active_nodes[0].ray_node_type_name == "head_node"
cluster_status.active_nodes[0].resource_usage.usage.sort(
key=lambda x: x.resource_name
)
assert cluster_status.active_nodes[0].resource_usage == NodeUsage(
usage=[
ResourceUsage(resource_name="CPU", total=1.0, used=0.5),
ResourceUsage(resource_name="GPU", total=2.0, used=0.0),
],
idle_time_ms=0,
)
assert cluster_status.idle_nodes[0].instance_id == "instance3"
assert cluster_status.idle_nodes[0].ray_node_type_name == "worker_node"
cluster_status.idle_nodes[0].resource_usage.usage.sort(
key=lambda x: x.resource_name
)
assert cluster_status.idle_nodes[0].resource_usage == NodeUsage(
usage=[
ResourceUsage(resource_name="CPU", total=1.0, used=0.0),
ResourceUsage(resource_name="GPU", total=2.0, used=0.0),
],
idle_time_ms=100,
)
# Assert on dead nodes
assert len(cluster_status.failed_nodes) == 1
assert cluster_status.failed_nodes[0].instance_id == "instance2"
assert cluster_status.failed_nodes[0].ray_node_type_name == "worker_node"
assert cluster_status.failed_nodes[0].resource_usage is None
# Assert on resource demands from tasks
assert len(cluster_status.resource_demands.ray_task_actor_demand) == 1
assert cluster_status.resource_demands.ray_task_actor_demand[
0
].bundles_by_count == [
ResourceRequestByCount(
bundle={"CPU": 1, "GPU": 1},
count=1,
)
]
# Assert on resource demands from placement groups
assert len(cluster_status.resource_demands.placement_group_demand) == 2
assert sorted(
cluster_status.resource_demands.placement_group_demand, key=lambda x: x.pg_id
) == [
PlacementGroupResourceDemand(
bundles_by_count=[
ResourceRequestByCount(bundle={"CPU": 1, "GPU": 1}, count=1)
],
strategy="STRICT_SPREAD",
pg_id="1x1x",
state="PENDING",
details="1x1x:STRICT_SPREAD|PENDING",
),
PlacementGroupResourceDemand(
bundles_by_count=[ResourceRequestByCount(bundle={"GPU": 2}, count=1)],
strategy="STRICT_PACK",
pg_id="2x2x",
state="PENDING",
details="2x2x:STRICT_PACK|PENDING",
),
]
# Assert on resource constraints
assert len(cluster_status.resource_demands.cluster_constraint_demand) == 1
assert cluster_status.resource_demands.cluster_constraint_demand[
0
].bundles_by_count == [
ResourceRequestByCount(bundle={"GPU": 2, "CPU": 100}, count=1)
]
# Assert on the cluster_resource_usage
assert sorted(
cluster_status.cluster_resource_usage, key=lambda x: x.resource_name
) == [
ResourceUsage(resource_name="CPU", total=2.0, used=0.5),
ResourceUsage(resource_name="GPU", total=4.0, used=0.0),
]
# Assert on the node stats
assert cluster_status.stats.cluster_resource_state_version == "10"
assert cluster_status.stats.gcs_request_time_s == 0.1
def test_cluster_status_parser_autoscaler_state():
test_data = {
"cluster_resource_state": {},
"autoscaling_state": {
"pending_instance_requests": [
{
"instance_type_name": "m5.large",
"ray_node_type_name": "head_node",
"count": 1,
"request_ts": 29999,
},
{
"instance_type_name": "m5.large",
"ray_node_type_name": "worker_node",
"count": 2,
"request_ts": 19999,
},
],
"pending_instances": [
{
"instance_type_name": "m5.large",
"ray_node_type_name": "head_node",
"instance_id": "instance1",
"ip_address": "10.10.10.10",
"details": "Starting Ray",
},
],
"failed_instance_requests": [
{
"instance_type_name": "m5.large",
"ray_node_type_name": "worker_node",
"count": 2,
"reason": "Insufficient capacity",
"start_ts": 10000,
"failed_ts": 20000,
}
],
"autoscaler_state_version": 10,
},
}
reply = _gen_cluster_status_reply(test_data)
stats = Stats(gcs_request_time_s=0.1)
cluster_status = ClusterStatusParser.from_get_cluster_status_reply(reply, stats)
# Assert on the pending requests
assert len(cluster_status.pending_launches) == 2
assert cluster_status.pending_launches[0].instance_type_name == "m5.large"
assert cluster_status.pending_launches[0].ray_node_type_name == "head_node"
assert cluster_status.pending_launches[0].count == 1
assert cluster_status.pending_launches[0].request_ts_s == 29999
assert cluster_status.pending_launches[1].instance_type_name == "m5.large"
assert cluster_status.pending_launches[1].ray_node_type_name == "worker_node"
assert cluster_status.pending_launches[1].count == 2
assert cluster_status.pending_launches[1].request_ts_s == 19999
# Assert on the failed requests
assert len(cluster_status.failed_launches) == 1
assert cluster_status.failed_launches[0].instance_type_name == "m5.large"
assert cluster_status.failed_launches[0].ray_node_type_name == "worker_node"
assert cluster_status.failed_launches[0].count == 2
assert cluster_status.failed_launches[0].details == "Insufficient capacity"
assert cluster_status.failed_launches[0].request_ts_s == 10000
assert cluster_status.failed_launches[0].failed_ts_s == 20000
# Assert on the pending nodes
assert len(cluster_status.pending_nodes) == 1
assert cluster_status.pending_nodes[0].instance_type_name == "m5.large"
assert cluster_status.pending_nodes[0].ray_node_type_name == "head_node"
assert cluster_status.pending_nodes[0].instance_id == "instance1"
assert cluster_status.pending_nodes[0].ip_address == "10.10.10.10"
assert cluster_status.pending_nodes[0].details == "Starting Ray"
# Assert on stats
assert cluster_status.stats.autoscaler_version == "10"
assert cluster_status.stats.gcs_request_time_s == 0.1
def test_cluster_status_formatter():
state = ClusterStatus(
idle_nodes=[
NodeInfo(
instance_id="instance1",
instance_type_name="m5.large",
ray_node_type_name="head_node",
ip_address="127.0.0.1",
node_status="RUNNING",
node_id="fffffffffffffffffffffffffffffffffffffffffffffffffff00001",
resource_usage=NodeUsage(
usage=[
ResourceUsage(resource_name="CPU", total=1.0, used=0.5),
ResourceUsage(resource_name="GPU", total=2.0, used=0.0),
ResourceUsage(
resource_name="object_store_memory",
total=10282.0,
used=5555.0,
),
],
idle_time_ms=0,
),
),
NodeInfo(
instance_id="instance2",
instance_type_name="m5.large",
ray_node_type_name="worker_node",
ip_address="127.0.0.2",
node_status="RUNNING",
node_id="fffffffffffffffffffffffffffffffffffffffffffffffffff00002",
resource_usage=NodeUsage(
usage=[
ResourceUsage(resource_name="CPU", total=1.0, used=0),
ResourceUsage(resource_name="GPU", total=2.0, used=0),
],
idle_time_ms=0,
),
),
NodeInfo(
instance_id="instance3",
instance_type_name="m5.large",
ray_node_type_name="worker_node",
ip_address="127.0.0.2",
node_status="RUNNING",
node_id="fffffffffffffffffffffffffffffffffffffffffffffffffff00003",
resource_usage=NodeUsage(
usage=[
ResourceUsage(resource_name="CPU", total=1.0, used=0.0),
],
idle_time_ms=0,
),
),
],
pending_launches=[
LaunchRequest(
instance_type_name="m5.large",
count=2,
ray_node_type_name="worker_node",
state=LaunchRequest.Status.PENDING,
request_ts_s=10000,
),
LaunchRequest(
instance_type_name="g5n.large",
count=1,
ray_node_type_name="worker_node_gpu",
state=LaunchRequest.Status.PENDING,
request_ts_s=20000,
),
],
failed_launches=[
LaunchRequest(
instance_type_name="m5.large",
count=2,
ray_node_type_name="worker_node",
state=LaunchRequest.Status.FAILED,
details="Insufficient capacity",
request_ts_s=10000,
failed_ts_s=20000,
),
],
pending_nodes=[
NodeInfo(
instance_id="instance4",
instance_type_name="m5.large",
ray_node_type_name="worker_node",
ip_address="127.0.0.3",
details="Starting Ray",
),
],
failed_nodes=[
NodeInfo(
instance_id="instance5",
instance_type_name="m5.large",
ray_node_type_name="worker_node",
ip_address="127.0.0.5",
node_status="DEAD",
),
],
cluster_resource_usage=[
ResourceUsage(resource_name="CPU", total=3.0, used=0.5),
ResourceUsage(resource_name="GPU", total=4.0, used=0.0),
ResourceUsage(
resource_name="object_store_memory", total=10282.0, used=5555.0
),
],
resource_demands=ResourceDemandSummary(
placement_group_demand=[
PlacementGroupResourceDemand(
pg_id="1x1x",
strategy="STRICT_SPREAD",
state="PENDING",
details="1x1x:STRICT_SPREAD|PENDING",
bundles_by_count=[
ResourceRequestByCount(bundle={"CPU": 1, "GPU": 1}, count=1)
],
),
PlacementGroupResourceDemand(
pg_id="2x2x",
strategy="STRICT_PACK",
state="PENDING",
details="2x2x:STRICT_PACK|PENDING",
bundles_by_count=[
ResourceRequestByCount(bundle={"GPU": 2}, count=1)
],
),
PlacementGroupResourceDemand(
pg_id="3x3x",
strategy="STRICT_PACK",
state="PENDING",
details="3x3x:STRICT_PACK|PENDING",
bundles_by_count=[
ResourceRequestByCount(bundle={"GPU": 2}, count=1)
],
),
],
ray_task_actor_demand=[
RayTaskActorDemand(
bundles_by_count=[
ResourceRequestByCount(bundle={"CPU": 1, "GPU": 1}, count=1)
]
),
RayTaskActorDemand(
bundles_by_count=[
ResourceRequestByCount(bundle={"CPU": 1, "GPU": 1}, count=10)
]
),
],
cluster_constraint_demand=[
ClusterConstraintDemand(
bundles_by_count=[
ResourceRequestByCount(bundle={"GPU": 2, "CPU": 100}, count=2)
]
),
],
),
stats=Stats(
gcs_request_time_s=0.1,
none_terminated_node_request_time_s=0.2,
autoscaler_iteration_time_s=0.3,
autoscaler_version="10",
cluster_resource_state_version="20",
request_ts_s=775303535,
),
)
actual = ClusterStatusFormatter.format(state, verbose=True)
expected = """======== Autoscaler status: 1994-07-27 10:05:35 ========
GCS request time: 0.100000s
Node Provider non_terminated_nodes time: 0.200000s
Autoscaler iteration time: 0.300000s
Node status
--------------------------------------------------------
Active:
(no active nodes)
Idle:
1 head_node
2 worker_node
Pending:
worker_node, 1 launching
worker_node_gpu, 1 launching
instance4: worker_node, starting ray
Recent failures:
worker_node: LaunchFailed (latest_attempt: 02:46:40) - Insufficient capacity
worker_node: NodeTerminated (instance_id: instance5)
Resources
--------------------------------------------------------
Total Usage:
0.5/3.0 CPU
0.0/4.0 GPU
5.42KiB/10.04KiB object_store_memory
From request_resources:
{'GPU': 2, 'CPU': 100}: 2 from request_resources()
Pending Demands:
{'CPU': 1, 'GPU': 1}: 11+ pending tasks/actors
{'CPU': 1, 'GPU': 1} * 1 (STRICT_SPREAD): 1+ pending placement groups
{'GPU': 2} * 1 (STRICT_PACK): 2+ pending placement groups
Node: instance1 (head_node)
Id: fffffffffffffffffffffffffffffffffffffffffffffffffff00001
Usage:
0.5/1.0 CPU
0.0/2.0 GPU
5.42KiB/10.04KiB object_store_memory
Activity:
(no activity)
Node: instance2 (worker_node)
Id: fffffffffffffffffffffffffffffffffffffffffffffffffff00002
Usage:
0/1.0 CPU
0/2.0 GPU
Activity:
(no activity)
Node: instance3 (worker_node)
Id: fffffffffffffffffffffffffffffffffffffffffffffffffff00003
Usage:
0.0/1.0 CPU
Activity:
(no activity)"""
assert actual == expected
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
TestResourceRequestUtil
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_itertools.py
|
{
"start": 96907,
"end": 99773
}
|
class ____(__TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
next(iterator)
del container, iterator
def test_accumulate(self):
a = []
self.makecycle(accumulate([1,2,a,3]), a)
def test_batched(self):
a = []
self.makecycle(batched([1,2,a,3], 2), a)
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_chain_from_iterable(self):
a = []
self.makecycle(chain.from_iterable([a]), a)
def test_combinations(self):
a = []
self.makecycle(combinations([1,2,a,3], 3), a)
def test_combinations_with_replacement(self):
a = []
self.makecycle(combinations_with_replacement([1,2,a,3], 3), a)
def test_compress(self):
a = []
self.makecycle(compress('ABCDEF', [1,0,1,0,1,0]), a)
def test_count(self):
a = []
Int = type('Int', (int,), dict(x=a))
self.makecycle(count(Int(0), Int(1)), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_issue2246(self):
# Issue 2246 -- the _grouper iterator was not included in GC
n = 10
keyfunc = lambda x: x
for i, j in groupby(range(n), key=keyfunc):
keyfunc.__dict__.setdefault('x',[]).append(j)
def test_filter(self):
a = []
self.makecycle(filter(lambda x:True, [a]*2), a)
def test_filterfalse(self):
a = []
self.makecycle(filterfalse(lambda x:False, a), a)
def test_zip(self):
a = []
self.makecycle(zip([a]*2, [a]*3), a)
def test_zip_longest(self):
a = []
self.makecycle(zip_longest([a]*2, [a]*3), a)
b = [a, None]
self.makecycle(zip_longest([a]*2, [a]*3, fillvalue=b), a)
def test_map(self):
a = []
self.makecycle(map(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_pairwise(self):
a = []
self.makecycle(pairwise([a]*5), a)
def test_permutations(self):
a = []
self.makecycle(permutations([1,2,a,3], 3), a)
def test_product(self):
a = []
self.makecycle(product([1,2,a,3], repeat=3), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
|
TestGC
|
python
|
realpython__materials
|
python-iterators-iterables/sequence_iter.py
|
{
"start": 0,
"end": 380
}
|
class ____:
def __init__(self, sequence):
self._sequence = sequence
self._index = 0
def __iter__(self):
return self
def __next__(self):
if self._index < len(self._sequence):
item = self._sequence[self._index]
self._index += 1
return item
else:
raise StopIteration
|
SequenceIterator
|
python
|
pypa__hatch
|
src/hatch/python/core.py
|
{
"start": 1285,
"end": 4020
}
|
class ____:
def __init__(self, directory: Path) -> None:
self.__directory = directory
@property
def directory(self) -> Path:
return self.__directory
def get_installed(self) -> dict[str, InstalledDistribution]:
if not self.directory.is_dir():
return {}
import json
installed_distributions: list[InstalledDistribution] = []
for path in self.directory.iterdir():
if not (path.name in DISTRIBUTIONS and path.is_dir()):
continue
metadata_file = path / InstalledDistribution.metadata_filename()
if not metadata_file.is_file():
continue
metadata = json.loads(metadata_file.read_text())
distribution = get_distribution(path.name, source=metadata.get("source", ""))
if not (path / distribution.python_path).is_file():
continue
installed_distributions.append(InstalledDistribution(path, distribution, metadata))
installed_distributions.sort(key=lambda d: ORDERED_DISTRIBUTIONS.index(d.name))
return {dist.name: dist for dist in installed_distributions}
def install(self, identifier: str) -> InstalledDistribution:
import json
from hatch.utils.network import download_file
dist = get_distribution(identifier)
path = self.directory / identifier
self.directory.ensure_dir_exists()
with temp_directory() as temp_dir:
archive_path = temp_dir / dist.archive_name
unpack_path = temp_dir / identifier
download_file(archive_path, dist.source, follow_redirects=True)
dist.unpack(archive_path, unpack_path)
backup_path = path.with_suffix(".bak")
if backup_path.is_dir():
backup_path.wait_for_dir_removed()
if path.is_dir():
path.replace(backup_path)
try:
unpack_path.replace(path)
except OSError:
import shutil
try:
shutil.move(str(unpack_path), str(path))
except OSError:
path.wait_for_dir_removed()
if backup_path.is_dir():
backup_path.replace(path)
raise
metadata = {"source": dist.source, "python_path": dist.python_path}
metadata_file = path / InstalledDistribution.metadata_filename()
metadata_file.write_text(json.dumps(metadata, indent=2))
return InstalledDistribution(path, dist, metadata)
@staticmethod
def remove(dist: InstalledDistribution) -> None:
dist.path.wait_for_dir_removed()
|
PythonManager
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1beta1_device_counter_consumption.py
|
{
"start": 383,
"end": 5403
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'counter_set': 'str',
'counters': 'dict(str, V1beta1Counter)'
}
attribute_map = {
'counter_set': 'counterSet',
'counters': 'counters'
}
def __init__(self, counter_set=None, counters=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceCounterConsumption - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._counter_set = None
self._counters = None
self.discriminator = None
self.counter_set = counter_set
self.counters = counters
@property
def counter_set(self):
"""Gets the counter_set of this V1beta1DeviceCounterConsumption. # noqa: E501
CounterSet is the name of the set from which the counters defined will be consumed. # noqa: E501
:return: The counter_set of this V1beta1DeviceCounterConsumption. # noqa: E501
:rtype: str
"""
return self._counter_set
@counter_set.setter
def counter_set(self, counter_set):
"""Sets the counter_set of this V1beta1DeviceCounterConsumption.
CounterSet is the name of the set from which the counters defined will be consumed. # noqa: E501
:param counter_set: The counter_set of this V1beta1DeviceCounterConsumption. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and counter_set is None: # noqa: E501
raise ValueError("Invalid value for `counter_set`, must not be `None`") # noqa: E501
self._counter_set = counter_set
@property
def counters(self):
"""Gets the counters of this V1beta1DeviceCounterConsumption. # noqa: E501
Counters defines the counters that will be consumed by the device. The maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each). # noqa: E501
:return: The counters of this V1beta1DeviceCounterConsumption. # noqa: E501
:rtype: dict(str, V1beta1Counter)
"""
return self._counters
@counters.setter
def counters(self, counters):
"""Sets the counters of this V1beta1DeviceCounterConsumption.
Counters defines the counters that will be consumed by the device. The maximum number counters in a device is 32. In addition, the maximum number of all counters in all devices is 1024 (for example, 64 devices with 16 counters each). # noqa: E501
:param counters: The counters of this V1beta1DeviceCounterConsumption. # noqa: E501
:type: dict(str, V1beta1Counter)
"""
if self.local_vars_configuration.client_side_validation and counters is None: # noqa: E501
raise ValueError("Invalid value for `counters`, must not be `None`") # noqa: E501
self._counters = counters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceCounterConsumption):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceCounterConsumption):
return True
return self.to_dict() != other.to_dict()
|
V1beta1DeviceCounterConsumption
|
python
|
getsentry__sentry
|
tests/sentry/users/api/endpoints/test_user_permission_details.py
|
{
"start": 339,
"end": 1628
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-permission-details"
def setUp(self) -> None:
super().setUp()
self.superuser = self.create_user(is_superuser=True)
self.add_user_permission(self.superuser, "users.admin")
self.staff_user = self.create_user(is_staff=True)
self.add_user_permission(self.staff_user, "users.admin")
self.normal_user = self.create_user(is_superuser=False, is_staff=False)
# For each request method testcase, ensure regular users fail
def test_fails_without_superuser_or_staff(self) -> None:
self.login_as(self.normal_user)
response = self.get_response("me", "broadcasts.admin")
assert response.status_code == 403
# For each request method testcase, ensure superuser+staff without users.admin fail
def test_fails_without_users_admin_permission(self) -> None:
self.superuser_and_staff = self.create_user(is_superuser=True, is_staff=True)
self.login_as(self.superuser_and_staff, superuser=True, staff=True)
# We are active superuser and staff but lack the users.admin permission
response = self.get_response("me", "broadcasts.admin", status_code=403)
assert response.status_code == 403
@control_silo_test
|
UserDetailsTest
|
python
|
pennersr__django-allauth
|
tests/apps/socialaccount/providers/vimeo_oauth2/tests.py
|
{
"start": 251,
"end": 1189
}
|
class ____(OAuth2TestsMixin, TestCase):
provider_id = VimeoOAuth2Provider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"uri": "/users/12345",
"name": "AllAuth",
"link": "https://vimeo.com/user12345",
"created_time": "2012-06-04T00:02:16+00:00",
"pictures": {
"uri": null,
"active": false,
"type": "default",
"sizes": [{
"width": 30,
"height": 30,
"link": "https://i.vimeocdn.com/portrait/defaults-blue_30x30.png"
}],
"resource_key": "1234567890abcdef"
},
"resource_key": "1234567890abcdef",
"account": "pro"
}""",
) # noqa
def get_expected_to_str(self):
return "AllAuth"
|
VimeoOAuth2Tests
|
python
|
mozilla__bleach
|
bleach/html5lib_shim.py
|
{
"start": 5418,
"end": 7212
}
|
class ____:
"""Wraps an HTMLInputStream to remember characters since last <
This wraps existing HTMLInputStream classes to keep track of the stream
since the last < which marked an open tag state.
"""
def __init__(self, inner_stream):
self._inner_stream = inner_stream
self.reset = self._inner_stream.reset
self.position = self._inner_stream.position
self._buffer = []
@property
def errors(self):
return self._inner_stream.errors
@property
def charEncoding(self):
return self._inner_stream.charEncoding
@property
def changeEncoding(self):
return self._inner_stream.changeEncoding
def char(self):
c = self._inner_stream.char()
# char() can return None if EOF, so ignore that
if c:
self._buffer.append(c)
return c
def charsUntil(self, characters, opposite=False):
chars = self._inner_stream.charsUntil(characters, opposite=opposite)
self._buffer.extend(list(chars))
return chars
def unget(self, char):
if self._buffer:
self._buffer.pop(-1)
return self._inner_stream.unget(char)
def get_tag(self):
"""Returns the stream history since last '<'
Since the buffer starts at the last '<' as as seen by tagOpenState(),
we know that everything from that point to when this method is called
is the "tag" that is being tokenized.
"""
return "".join(self._buffer)
def start_tag(self):
"""Resets stream history to just '<'
This gets called by tagOpenState() which marks a '<' that denotes an
open tag. Any time we see that, we reset the buffer.
"""
self._buffer = ["<"]
|
InputStreamWithMemory
|
python
|
getsentry__sentry
|
src/sentry/release_health/metrics_sessions_v2.py
|
{
"start": 1996,
"end": 2345
}
|
class ____(Enum):
ABNORMAL = "abnormal"
CRASHED = "crashed"
ERRORED = "errored"
HEALTHY = "healthy"
UNHANDLED = "unhandled"
ALL_STATUSES = frozenset(iter(SessionStatus))
#: Used to filter results by session.status
StatusFilter = Optional[frozenset[SessionStatus]]
MAX_POSTGRES_LIMIT = 100
@dataclass(frozen=True)
|
SessionStatus
|
python
|
pallets__click
|
src/click/types.py
|
{
"start": 28757,
"end": 35230
}
|
class ____(ParamType):
"""The ``Path`` type is similar to the :class:`File` type, but
returns the filename instead of an open file. Various checks can be
enabled to validate the type of file and permissions.
:param exists: The file or directory needs to exist for the value to
be valid. If this is not set to ``True``, and the file does not
exist, then all further checks are silently skipped.
:param file_okay: Allow a file as a value.
:param dir_okay: Allow a directory as a value.
:param readable: if true, a readable check is performed.
:param writable: if true, a writable check is performed.
:param executable: if true, an executable check is performed.
:param resolve_path: Make the value absolute and resolve any
symlinks. A ``~`` is not expanded, as this is supposed to be
done by the shell only.
:param allow_dash: Allow a single dash as a value, which indicates
a standard stream (but does not open it). Use
:func:`~click.open_file` to handle opening this value.
:param path_type: Convert the incoming path value to this type. If
``None``, keep Python's default, which is ``str``. Useful to
convert to :class:`pathlib.Path`.
.. versionchanged:: 8.1
Added the ``executable`` parameter.
.. versionchanged:: 8.0
Allow passing ``path_type=pathlib.Path``.
.. versionchanged:: 6.0
Added the ``allow_dash`` parameter.
"""
envvar_list_splitter: t.ClassVar[str] = os.path.pathsep
def __init__(
self,
exists: bool = False,
file_okay: bool = True,
dir_okay: bool = True,
writable: bool = False,
readable: bool = True,
resolve_path: bool = False,
allow_dash: bool = False,
path_type: type[t.Any] | None = None,
executable: bool = False,
):
self.exists = exists
self.file_okay = file_okay
self.dir_okay = dir_okay
self.readable = readable
self.writable = writable
self.executable = executable
self.resolve_path = resolve_path
self.allow_dash = allow_dash
self.type = path_type
if self.file_okay and not self.dir_okay:
self.name: str = _("file")
elif self.dir_okay and not self.file_okay:
self.name = _("directory")
else:
self.name = _("path")
def to_info_dict(self) -> dict[str, t.Any]:
info_dict = super().to_info_dict()
info_dict.update(
exists=self.exists,
file_okay=self.file_okay,
dir_okay=self.dir_okay,
writable=self.writable,
readable=self.readable,
allow_dash=self.allow_dash,
)
return info_dict
def coerce_path_result(
self, value: str | os.PathLike[str]
) -> str | bytes | os.PathLike[str]:
if self.type is not None and not isinstance(value, self.type):
if self.type is str:
return os.fsdecode(value)
elif self.type is bytes:
return os.fsencode(value)
else:
return t.cast("os.PathLike[str]", self.type(value))
return value
def convert(
self,
value: str | os.PathLike[str],
param: Parameter | None,
ctx: Context | None,
) -> str | bytes | os.PathLike[str]:
rv = value
is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
if not is_dash:
if self.resolve_path:
rv = os.path.realpath(rv)
try:
st = os.stat(rv)
except OSError:
if not self.exists:
return self.coerce_path_result(rv)
self.fail(
_("{name} {filename!r} does not exist.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
if not self.file_okay and stat.S_ISREG(st.st_mode):
self.fail(
_("{name} {filename!r} is a file.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
if not self.dir_okay and stat.S_ISDIR(st.st_mode):
self.fail(
_("{name} {filename!r} is a directory.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
if self.readable and not os.access(rv, os.R_OK):
self.fail(
_("{name} {filename!r} is not readable.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
if self.writable and not os.access(rv, os.W_OK):
self.fail(
_("{name} {filename!r} is not writable.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
if self.executable and not os.access(value, os.X_OK):
self.fail(
_("{name} {filename!r} is not executable.").format(
name=self.name.title(), filename=format_filename(value)
),
param,
ctx,
)
return self.coerce_path_result(rv)
def shell_complete(
self, ctx: Context, param: Parameter, incomplete: str
) -> list[CompletionItem]:
"""Return a special completion marker that tells the completion
system to use the shell to provide path completions for only
directories or any paths.
:param ctx: Invocation context for this command.
:param param: The parameter that is requesting completion.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from click.shell_completion import CompletionItem
type = "dir" if self.dir_okay and not self.file_okay else "file"
return [CompletionItem(incomplete, type=type)]
|
Path
|
python
|
sphinx-doc__sphinx
|
tests/test_util/typing_test_data.py
|
{
"start": 1907,
"end": 2044
}
|
class ____:
def __init__(self, parent: Optional['Node']) -> None:
pass
def children(self) -> List['Node']:
pass
|
Node
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/base.py
|
{
"start": 84559,
"end": 86934
}
|
class ____(util.OrderedSet["ColumnClause[Any]"]):
def contains_column(self, col: ColumnClause[Any]) -> bool:
return col in self
def extend(self, cols: Iterable[Any]) -> None:
for col in cols:
self.add(col)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c == local)
return elements.and_(*l)
def __hash__(self) -> int: # type: ignore[override]
return hash(tuple(x for x in self))
def _entity_namespace(
entity: Union[_HasEntityNamespace, ExternallyTraversible],
) -> _EntityNamespace:
"""Return the nearest .entity_namespace for the given entity.
If not immediately available, does an iterate to find a sub-element
that has one, if any.
"""
try:
return cast(_HasEntityNamespace, entity).entity_namespace
except AttributeError:
for elem in visitors.iterate(cast(ExternallyTraversible, entity)):
if _is_has_entity_namespace(elem):
return elem.entity_namespace
else:
raise
@overload
def _entity_namespace_key(
entity: Union[_HasEntityNamespace, ExternallyTraversible],
key: str,
) -> SQLCoreOperations[Any]: ...
@overload
def _entity_namespace_key(
entity: Union[_HasEntityNamespace, ExternallyTraversible],
key: str,
default: _NoArg,
) -> SQLCoreOperations[Any]: ...
@overload
def _entity_namespace_key(
entity: Union[_HasEntityNamespace, ExternallyTraversible],
key: str,
default: _T,
) -> Union[SQLCoreOperations[Any], _T]: ...
def _entity_namespace_key(
entity: Union[_HasEntityNamespace, ExternallyTraversible],
key: str,
default: Union[SQLCoreOperations[Any], _T, _NoArg] = NO_ARG,
) -> Union[SQLCoreOperations[Any], _T]:
"""Return an entry from an entity_namespace.
Raises :class:`_exc.InvalidRequestError` rather than attribute error
on not found.
"""
try:
ns = _entity_namespace(entity)
if default is not NO_ARG:
return getattr(ns, key, default)
else:
return getattr(ns, key) # type: ignore
except AttributeError as err:
raise exc.InvalidRequestError(
'Entity namespace for "%s" has no property "%s"' % (entity, key)
) from err
|
ColumnSet
|
python
|
streamlit__streamlit
|
lib/streamlit/connections/base_connection.py
|
{
"start": 897,
"end": 6842
}
|
class ____(ABC, Generic[RawConnectionT]):
"""The abstract base class that all Streamlit Connections must inherit from.
This base class provides connection authors with a standardized way to hook into the
``st.connection()`` factory function: connection authors are required to provide an
implementation for the abstract method ``_connect`` in their subclasses.
Additionally, it also provides a few methods/properties designed to make
implementation of connections more convenient. See the docstrings for each of the
methods of this class for more information
.. note::
While providing an implementation of ``_connect`` is technically all that's
required to define a valid connection, connections should also provide the user
with context-specific ways of interacting with the underlying connection object.
For example, the first-party SQLConnection provides a ``query()`` method for
reads and a ``session`` property for more complex operations.
"""
def __init__(self, connection_name: str, **kwargs: Any) -> None:
"""Create a BaseConnection.
This constructor is called by the connection factory machinery when a user
script calls ``st.connection()``.
Subclasses of BaseConnection that want to overwrite this method should take care
to also call the base class' implementation.
Parameters
----------
connection_name : str
The name of this connection. This corresponds to the
``[connections.<connection_name>]`` config section in ``st.secrets``.
kwargs : dict
Any other kwargs to pass to this connection class' ``_connect`` method.
Returns
-------
None
"""
self._connection_name = connection_name
self._kwargs = kwargs
self._config_section_hash = calc_md5(json.dumps(self._secrets.to_dict()))
secrets_singleton.file_change_listener.connect(self._on_secrets_changed)
self._raw_instance: RawConnectionT | None = self._connect(**kwargs)
def __del__(self) -> None:
secrets_singleton.file_change_listener.disconnect(self._on_secrets_changed)
def __getattribute__(self, name: str) -> Any:
try:
return object.__getattribute__(self, name)
except AttributeError:
if hasattr(self._instance, name):
raise AttributeError(
f"`{name}` doesn't exist here, but you can call `._instance.{name}` instead"
)
raise
# Methods with default implementations that we don't expect subclasses to want or
# need to overwrite.
def _on_secrets_changed(self, _: str) -> None:
"""Reset the raw connection object when this connection's secrets change.
We don't expect either user scripts or connection authors to have to use or
overwrite this method.
"""
new_hash = calc_md5(json.dumps(self._secrets.to_dict()))
# Only reset the connection if the secrets file section specific to this
# connection has changed.
if new_hash != self._config_section_hash:
self._config_section_hash = new_hash
self.reset()
@property
def _secrets(self) -> AttrDict:
"""Get the secrets for this connection from the corresponding st.secrets section.
We expect this property to be used primarily by connection authors when they
are implementing their class' ``_connect`` method. User scripts should, for the
most part, have no reason to use this property.
"""
connections_section: AttrDict | None = None
if secrets_singleton.load_if_toml_exists():
connections_section = secrets_singleton.get("connections")
if connections_section is None or type(connections_section) is not AttrDict:
return AttrDict({})
return cast(
"AttrDict", connections_section.get(self._connection_name, AttrDict({}))
)
def reset(self) -> None:
"""Reset this connection so that it gets reinitialized the next time it's used.
This method can be useful when a connection has become stale, an auth token has
expired, or in similar scenarios where a broken connection might be fixed by
reinitializing it. Note that some connection methods may already use ``reset()``
in their error handling code.
Returns
-------
None
Example
-------
>>> import streamlit as st
>>>
>>> conn = st.connection("my_conn")
>>>
>>> # Reset the connection before using it if it isn't healthy
>>> # Note: is_healthy() isn't a real method and is just shown for example here.
>>> if not conn.is_healthy():
... conn.reset()
>>>
>>> # Do stuff with conn...
"""
self._raw_instance = None
@property
def _instance(self) -> RawConnectionT:
"""Get an instance of the underlying connection, creating a new one if needed."""
if self._raw_instance is None:
self._raw_instance = self._connect(**self._kwargs)
return self._raw_instance
# Abstract fields/methods that subclasses of BaseConnection must implement
@abstractmethod
def _connect(self, **kwargs: Any) -> RawConnectionT:
"""Create an instance of an underlying connection object.
This abstract method is the one method that we require subclasses of
BaseConnection to provide an implementation for. It is called when first
creating a connection and when reconnecting after a connection is reset.
Parameters
----------
kwargs : dict
Returns
-------
RawConnectionT
The underlying connection object.
"""
raise NotImplementedError
|
BaseConnection
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_masked.py
|
{
"start": 20172,
"end": 20263
}
|
class ____(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
|
TestMaskedLongitudeCopyFilled
|
python
|
google__jax
|
jax/_src/mesh.py
|
{
"start": 3813,
"end": 4930
}
|
class ____(enum.Enum):
Auto = enum.auto()
Explicit = enum.auto()
Manual = enum.auto()
def __repr__(self):
return self.name
def _normalize_axis_types(axis_names, axis_types, name):
axis_types = ((AxisType.Auto,) * len(axis_names)
if axis_types is None else axis_types)
if not isinstance(axis_types, tuple):
axis_types = (axis_types,)
if not all(isinstance(a, AxisType) for a in axis_types):
raise TypeError(
f"axis_types passed to {name} must be of type `jax.sharding.AxisType`."
f" Got {axis_types} of type {tuple(type(a) for a in axis_types)}")
if len(axis_names) != len(axis_types):
raise ValueError(
"Number of axis names should match the number of axis_types. Got"
f" axis_names={axis_names} and axis_types={axis_types}")
return axis_types
def all_axis_types_match(axis_types, ty: AxisType) -> bool:
if not axis_types:
return False
return all(t == ty for t in axis_types)
def any_axis_types_match(axis_types, ty: AxisType) -> bool:
if not axis_types:
return False
return any(t == ty for t in axis_types)
|
AxisType
|
python
|
huggingface__transformers
|
tests/models/granitemoehybrid/test_modeling_granitemoehybrid.py
|
{
"start": 2642,
"end": 15214
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
model_tester_class = GraniteMoeHybridModelTester
all_model_classes = (
(
GraniteMoeHybridModel,
GraniteMoeHybridForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": GraniteMoeHybridModel,
"text-generation": GraniteMoeHybridForCausalLM,
}
if is_torch_available()
else {}
)
# Need to use `0.8` instead of `0.9` for `test_cpu_offload`
# This is because we are hitting edge cases with the causal_mask buffer
model_split_percents = [0.5, 0.7, 0.8]
def _check_caches_are_equal(
self, cache1: HybridMambaAttentionDynamicCache, cache2: HybridMambaAttentionDynamicCache
):
if not isinstance(cache1, HybridMambaAttentionDynamicCache) or not isinstance(
cache2, HybridMambaAttentionDynamicCache
):
raise ValueError("The wrong cache is being used!")
if not len(cache1) == len(cache2):
raise ValueError("Both caches do not have the same number of layers.")
num_layers = len(cache1)
for idx in range(num_layers):
torch.testing.assert_close(cache1.key_cache[idx], cache2.key_cache[idx])
torch.testing.assert_close(cache1.value_cache[idx], cache2.value_cache[idx])
torch.testing.assert_close(cache1.conv_states[idx], cache2.conv_states[idx])
torch.testing.assert_close(cache1.ssm_states[idx], cache2.ssm_states[idx])
def setUp(self):
self.model_tester = self.model_tester_class(self)
self.config_tester = ConfigTester(self, config_class=self.model_tester.config_class, hidden_size=64)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the Bamba model outputs attention only for its attention layers
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
expected_num_attentions = self.model_tester.num_hidden_layers - len(self.model_tester.attn_layer_indices)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), expected_num_attentions)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), expected_num_attentions)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_batching_equivalence(self):
# need to disable the tril input mask
orig = self.model_tester.use_input_mask
self.model_tester.use_input_mask = False
super().test_batching_equivalence()
self.model_tester.use_input_mask = orig
@pytest.mark.generate
def test_left_padding_compatibility(self):
# TODO: document why a random attention mask causes this test to fail, but a full mask doesn't
unpadded_custom_inputs = {"attention_mask": None}
super().test_left_padding_compatibility(unpadded_custom_inputs=unpadded_custom_inputs)
@unittest.skip(
"Bamba requires additionally specifying position_ids, seq_idx, and FlashAttentionKwargs for padding-free training."
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip(
"Bamba requires additionally specifying position_ids, seq_idx, and FlashAttentionKwargs for padding-free training."
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_and_fa_kwargs(self):
pass
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
@unittest.skip(
"NotImplementedError: seq_idx support requires fast path support. Please install mamba_ssm and causal_conv1d"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids_seq_idx_and_fa_kwargs(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
for model_class in self.all_generative_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
dummy_input = inputs_dict[model_class.main_input_name]
if dummy_input.dtype in [torch.float32, torch.bfloat16]:
dummy_input = dummy_input.to(torch.float16)
# make sure that all models have enough positions for generation
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
# Ensure inputs_dict also has labels in it, as their presence/absence can induce
# dtype conversions. This also lets us compare losses.
labels = inputs_dict["input_ids"].clone()
# Mask padding tokens
labels[~dummy_attention_mask.bool()] = -100
# Also need to mask the first non-trivial token to match the padding-free batch.
first_nonneg_idx = (labels >= 0).int().argmax(dim=1)
labels[torch.arange(labels.size(0), device=labels.device), first_nonneg_idx] = -100
inputs_dict["labels"] = labels
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.float16,
attn_implementation="flash_attention_2",
)
.to(torch_device)
.eval()
)
# flatten
features = [
{"input_ids": i[a.bool()].tolist()}
for i, a in zip(inputs_dict["input_ids"], inputs_dict["attention_mask"])
]
# add position_ids + fa_kwargs + seq_idx
data_collator = DataCollatorWithFlattening(
return_tensors="pt", return_seq_idx=True, return_flash_attn_kwargs=True
)
batch = data_collator(features)
batch_accelerator = {k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()}
res_padded = model(**inputs_dict)
res_padfree = model(**batch_accelerator)
logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()]
logits_padfree = res_padfree.logits[0]
torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), rtol=0, atol=0)
# acceptable numerical instability
tol = torch.finfo(torch.float16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
loss_padded = res_padded.loss
loss_padfree = res_padfree.loss
torch.testing.assert_close(loss_padded, loss_padfree)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, HybridMambaAttentionDynamicCache)
# (batch, kv heads, seq_length, head_dim)
num_heads = getattr(config, "num_key_value_heads", config.num_attention_heads)
head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
attention_shape = (batch_size, num_heads, seq_length, head_dim)
conv_shape = (
batch_size,
config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * config.mamba_d_state,
config.mamba_d_conv,
)
ssm_shape = (batch_size, config.mamba_n_heads, config.mamba_d_head, config.mamba_d_state)
self.assertTrue(config.num_hidden_layers, len(past_key_values))
for idx in range(len(past_key_values)):
if config.layers_block_type[idx] == "mamba":
self.assertEqual(past_key_values.conv_states[idx].shape, conv_shape)
self.assertEqual(past_key_values.ssm_states[idx].shape, ssm_shape)
else:
self.assertEqual(past_key_values.key_cache[idx].shape, attention_shape)
self.assertEqual(past_key_values.value_cache[idx].shape, attention_shape)
def test_config_requires_mamba_or_attention_layers(self):
"""Ensure we can't create a config with disallowed layers."""
with pytest.raises(ValueError):
GraniteMoeHybridConfig(layer_types=["not allowed!"])
# TODO (@alex-jw-brooks) - update this once the model(s) are out
@unittest.skip(reason="GraniteMoeHybrid models are not yet released")
@require_torch_gpu
|
GraniteMoeHybridModelTest
|
python
|
langchain-ai__langchain
|
libs/core/tests/unit_tests/runnables/test_fallbacks.py
|
{
"start": 9986,
"end": 10884
}
|
class ____(BaseChatModel):
foo: int
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call."""
return ChatResult(generations=[])
@override
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
return self.bind(tools=tools)
@override
def with_structured_output(
self, schema: dict | type[BaseModel], **kwargs: Any
) -> Runnable[LanguageModelInput, dict | BaseModel]:
return RunnableLambda(lambda _: {"foo": self.foo})
@property
def _llm_type(self) -> str:
return "fake1"
|
FakeStructuredOutputModel
|
python
|
astropy__astropy
|
astropy/coordinates/tests/test_masked.py
|
{
"start": 16397,
"end": 16947
}
|
class ____(TestSkyCoordWithDifferentials):
@classmethod
def setup_class(cls):
super().setup_class()
# Overwrite SkyCoord using unmasked distance.
cls.mask_dis = False
cls.sc = SkyCoord(
ra=cls.ra,
dec=cls.dec,
distance=cls.dis,
pm_ra_cosdec=cls.mpm_ra_cosdec,
pm_dec=cls.mpm_dec,
radial_velocity=cls.mrv,
)
cls.mask = cls.mask_dis | cls.mask_pm_ra_cosdec | cls.mask_pm_dec | cls.mask_rv
|
TestSkyCoordWithOnlyDifferentialsMasked
|
python
|
has2k1__plotnine
|
tests/test_helpers.py
|
{
"start": 193,
"end": 1263
}
|
class ____:
data = pd.DataFrame(
{
"x": [0, 1, 2, 3, 4, 5, 6],
"y": [0, 1, 2, 3, 4, 5, 6],
"g": list("aabbbcc"),
}
)
def test_continuous_limits(self):
p = ggplot(self.data, aes("x", "y")) + geom_point()
limits = cast("tuple[float, float]", get_aesthetic_limits(p, "x"))
npt.assert_array_almost_equal(limits, [0, 6])
def test_discrete_limits(self):
p = ggplot(self.data, aes("g")) + geom_bar()
limits = cast("list[str]", get_aesthetic_limits(p, "x"))
assert limits == ["a", "b", "c"]
def test_facet_limits(self):
p = (
ggplot(self.data, aes("x", "y"))
+ geom_point()
+ facet_wrap("g", scales="free_x")
)
limits = cast(
"list[tuple[float, float]]", get_aesthetic_limits(p, "x")
)
npt.assert_array_almost_equal(limits[0], [0, 1])
npt.assert_array_almost_equal(limits[1], [2, 4])
npt.assert_array_almost_equal(limits[2], [5, 6])
|
TestGetAestheticLimits
|
python
|
pola-rs__polars
|
py-polars/src/polars/series/plotting.py
|
{
"start": 430,
"end": 6673
}
|
class ____:
"""Series.plot namespace."""
_accessor = "plot"
def __init__(self, s: Series) -> None:
name = s.name or "value"
self._df = s.to_frame(name)
self._series_name = name
def hist(
self,
/,
**kwargs: Unpack[EncodeKwds],
) -> alt.Chart:
"""
Draw histogram.
Polars does not implement plotting logic itself but instead defers to
`Altair <https://altair-viz.github.io/>`_.
`s.plot.hist(**kwargs)` is shorthand for
`alt.Chart(s.to_frame()).mark_bar(tooltip=True).encode(x=alt.X(f'{s.name}:Q', bin=True), y='count()', **kwargs).interactive()`,
and is provided for convenience - for full customisatibility, use a plotting
library directly.
.. versionchanged:: 1.6.0
In prior versions of Polars, HvPlot was the plotting backend. If you would
like to restore the previous plotting functionality, all you need to do
is add `import hvplot.polars` at the top of your script and replace
`df.plot` with `df.hvplot`.
Parameters
----------
**kwargs
Additional arguments and keyword arguments passed to Altair.
Examples
--------
>>> s = pl.Series("price", [1, 3, 3, 3, 5, 2, 6, 5, 5, 5, 7])
>>> s.plot.hist() # doctest: +SKIP
""" # noqa: W505
if self._series_name == "count()":
msg = "cannot use `plot.hist` when Series name is `'count()'`"
raise ValueError(msg)
encodings: Encodings = {
"x": alt.X(f"{self._series_name}:Q", bin=True),
"y": "count()",
}
return (
alt.Chart(self._df)
.mark_bar(tooltip=True)
.encode(**encodings, **kwargs)
.interactive()
)
def kde(
self,
/,
**kwargs: Unpack[EncodeKwds],
) -> alt.Chart:
"""
Draw kernel density estimate plot.
Polars does not implement plotting logic itself but instead defers to
`Altair <https://altair-viz.github.io/>`_.
`s.plot.kde(**kwargs)` is shorthand for
`alt.Chart(s.to_frame()).transform_density(s.name, as_=[s.name, 'density']).mark_area(tooltip=True).encode(x=s.name, y='density:Q', **kwargs).interactive()`,
and is provided for convenience - for full customisatibility, use a plotting
library directly.
.. versionchanged:: 1.6.0
In prior versions of Polars, HvPlot was the plotting backend. If you would
like to restore the previous plotting functionality, all you need to do
is add `import hvplot.polars` at the top of your script and replace
`df.plot` with `df.hvplot`.
Parameters
----------
**kwargs
Additional keyword arguments passed to Altair.
Examples
--------
>>> s = pl.Series("price", [1, 3, 3, 3, 5, 2, 6, 5, 5, 5, 7])
>>> s.plot.kde() # doctest: +SKIP
""" # noqa: W505
if self._series_name == "density":
msg = "cannot use `plot.kde` when Series name is `'density'`"
raise ValueError(msg)
encodings: Encodings = {"x": self._series_name, "y": "density:Q"}
return (
alt.Chart(self._df)
.transform_density(self._series_name, as_=[self._series_name, "density"])
.mark_area(tooltip=True)
.encode(**encodings, **kwargs)
.interactive()
)
def line(
self,
/,
**kwargs: Unpack[EncodeKwds],
) -> alt.Chart:
"""
Draw line plot.
Polars does not implement plotting logic itself but instead defers to
`Altair <https://altair-viz.github.io/>`_.
`s.plot.line(**kwargs)` is shorthand for
`alt.Chart(s.to_frame().with_row_index()).mark_line(tooltip=True).encode(x='index', y=s.name, **kwargs).interactive()`,
and is provided for convenience - for full customisatibility, use a plotting
library directly.
.. versionchanged:: 1.6.0
In prior versions of Polars, HvPlot was the plotting backend. If you would
like to restore the previous plotting functionality, all you need to do
is add `import hvplot.polars` at the top of your script and replace
`df.plot` with `df.hvplot`.
Parameters
----------
**kwargs
Additional keyword arguments passed to Altair.
Examples
--------
>>> s = pl.Series("price", [1, 3, 3, 3, 5, 2, 6, 5, 5, 5, 7])
>>> s.plot.line() # doctest: +SKIP
""" # noqa: W505
if self._series_name == "index":
msg = "cannot call `plot.line` when Series name is 'index'"
raise ValueError(msg)
encodings: Encodings = {"x": "index", "y": self._series_name}
return (
alt.Chart(self._df.with_row_index())
.mark_line(tooltip=True)
.encode(**encodings, **kwargs)
.interactive()
)
def __getattr__(self, attr: str) -> Callable[..., alt.Chart]:
if self._series_name == "index":
msg = f"Cannot call `plot.{attr}` when Series name is 'index'"
raise ValueError(msg)
if attr == "scatter":
# alias `scatter` to `point` because of how common it is
attr = "point"
method = getattr(alt.Chart(self._df.with_row_index()), f"mark_{attr}", None)
if method is None:
msg = f"Altair has no method 'mark_{attr}'"
raise AttributeError(msg)
encodings: Encodings = {"x": "index", "y": self._series_name}
accepts_tooltip_argument = "tooltip" in {
value.name for value in inspect.signature(method).parameters.values()
}
if accepts_tooltip_argument:
def func(**kwargs: EncodeKwds) -> alt.Chart:
return method(tooltip=True).encode(**encodings, **kwargs).interactive()
else:
def func(**kwargs: EncodeKwds) -> alt.Chart:
return method().encode(**encodings, **kwargs).interactive()
return func
|
SeriesPlot
|
python
|
pypa__warehouse
|
tests/unit/captcha/test_recaptcha.py
|
{
"start": 693,
"end": 7746
}
|
class ____:
@responses.activate
def test_verify_service_disabled(self):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
body="",
)
serv = recaptcha.Service.create_service(
context=None, request=pretend.stub(registry=pretend.stub(settings={}))
)
assert serv.verify_response("") is None
assert not responses.calls
@responses.activate
def test_verify_service_disabled_with_none(self):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
body="",
)
serv = recaptcha.Service.create_service(
context=None,
request=pretend.stub(
registry=pretend.stub(
settings={
"recaptcha.site_key": None,
"recaptcha.secret_key": None,
},
),
),
)
assert serv.verify_response("") is None
assert not responses.calls
@responses.activate
def test_remote_ip_payload(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={"success": True},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
serv.verify_response("meaningless", remote_ip="ip")
payload = dict(urllib.parse.parse_qsl(responses.calls[0].request.body))
assert payload["remoteip"] == "ip"
@responses.activate
def test_unexpected_data_error(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
body="something awful",
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
with pytest.raises(recaptcha.UnexpectedError) as err:
serv.verify_response("meaningless")
expected = "Unexpected data in response body: something awful"
assert str(err.value) == expected
@responses.activate
def test_missing_success_key_error(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={"foo": "bar"},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
with pytest.raises(recaptcha.UnexpectedError) as err:
serv.verify_response("meaningless")
expected = "Missing 'success' key in response: {'foo': 'bar'}"
assert str(err.value) == expected
@responses.activate
def test_missing_error_codes_key_error(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={"success": False},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
with pytest.raises(recaptcha.UnexpectedError) as err:
serv.verify_response("meaningless")
expected = "Response missing 'error-codes' key: {'success': False}"
assert str(err.value) == expected
@responses.activate
def test_error_map_error(self, session_resetting_request):
for key, exc_tp in recaptcha.ERROR_CODE_MAP.items():
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={
"success": False,
"challenge_ts": 0,
"hostname": "hotname_value",
"error_codes": [key],
},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
with pytest.raises(exc_tp):
serv.verify_response("meaningless")
responses.reset()
@responses.activate
def test_error_map_unknown_error(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={
"success": False,
"challenge_ts": 0,
"hostname": "hostname_value",
"error_codes": ["slartibartfast"],
},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
with pytest.raises(recaptcha.UnexpectedError) as err:
serv.verify_response("meaningless")
assert str(err.value) == "Unexpected error code: slartibartfast"
@responses.activate
def test_challenge_response_missing_timestamp_success(
self, session_resetting_request
):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={
"success": True,
"hostname": "hostname_value",
},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
res = serv.verify_response("meaningless")
assert isinstance(res, recaptcha.ChallengeResponse)
assert res.challenge_ts is None
assert res.hostname == "hostname_value"
@responses.activate
def test_challenge_response_missing_hostname_success(
self, session_resetting_request
):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={
"success": True,
"challenge_ts": 0,
},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
res = serv.verify_response("meaningless")
assert isinstance(res, recaptcha.ChallengeResponse)
assert res.hostname is None
assert res.challenge_ts == 0
@responses.activate
def test_challenge_response_success(self, session_resetting_request):
responses.add(
responses.POST,
recaptcha.VERIFY_URL,
json={
"success": True,
"hostname": "hostname_value",
"challenge_ts": 0,
},
)
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
res = serv.verify_response("meaningless")
assert isinstance(res, recaptcha.ChallengeResponse)
assert res.hostname == "hostname_value"
assert res.challenge_ts == 0
@responses.activate
def test_unexpected_error(self, session_resetting_request):
serv = recaptcha.Service.create_service(
context=None, request=session_resetting_request
)
serv.request.http.post = pretend.raiser(socket.error)
with pytest.raises(recaptcha.UnexpectedError):
serv.verify_response("meaningless")
|
TestVerifyResponse
|
python
|
kamyu104__LeetCode-Solutions
|
Python/valid-palindrome-iv.py
|
{
"start": 294,
"end": 683
}
|
class ____(object):
def makePalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
cnt = 0
left, right = 0, len(s)-1
while left < right:
if s[left] != s[right]:
cnt += 1
if cnt > 2:
return False
left += 1
right -= 1
return True
|
Solution2
|
python
|
conda__conda
|
conda/exceptions.py
|
{
"start": 16605,
"end": 16692
}
|
class ____(ChannelNotAllowed):
warning = "Channel included in denylist"
|
ChannelDenied
|
python
|
python-markdown__markdown
|
markdown/extensions/codehilite.py
|
{
"start": 1685,
"end": 9778
}
|
class ____:
"""
Determine language of source code, and pass it on to the Pygments highlighter.
Usage:
```python
code = CodeHilite(src=some_code, lang='python')
html = code.hilite()
```
Arguments:
src: Source string or any object with a `.readline` attribute.
Keyword arguments:
lang (str): String name of Pygments lexer to use for highlighting. Default: `None`.
guess_lang (bool): Auto-detect which lexer to use.
Ignored if `lang` is set to a valid value. Default: `True`.
use_pygments (bool): Pass code to Pygments for code highlighting. If `False`, the code is
instead wrapped for highlighting by a JavaScript library. Default: `True`.
pygments_formatter (str): The name of a Pygments formatter or a formatter class used for
highlighting the code blocks. Default: `html`.
linenums (bool): An alias to Pygments `linenos` formatter option. Default: `None`.
css_class (str): An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
lang_prefix (str): Prefix prepended to the language. Default: "language-".
Other Options:
Any other options are accepted and passed on to the lexer and formatter. Therefore,
valid options include any options which are accepted by the `html` formatter or
whichever lexer the code's language uses. Note that most lexers do not have any
options. However, a few have very useful options, such as PHP's `startinline` option.
Any invalid options are ignored without error.
* **Formatter options**: <https://pygments.org/docs/formatters/#HtmlFormatter>
* **Lexer Options**: <https://pygments.org/docs/lexers/>
Additionally, when Pygments is enabled, the code's language is passed to the
formatter as an extra option `lang_str`, whose value being `{lang_prefix}{lang}`.
This option has no effect to the Pygments' builtin formatters.
Advanced Usage:
```python
code = CodeHilite(
src = some_code,
lang = 'php',
startinline = True, # Lexer option. Snippet does not start with `<?php`.
linenostart = 42, # Formatter option. Snippet starts on line 42.
hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
linenos = 'inline' # Formatter option. Avoid alignment problems.
)
html = code.hilite()
```
"""
def __init__(self, src: str, **options):
self.src = src
self.lang: str | None = options.pop('lang', None)
self.guess_lang: bool = options.pop('guess_lang', True)
self.use_pygments: bool = options.pop('use_pygments', True)
self.lang_prefix: str = options.pop('lang_prefix', 'language-')
self.pygments_formatter: str | Callable = options.pop('pygments_formatter', 'html')
if 'linenos' not in options:
options['linenos'] = options.pop('linenums', None)
if 'cssclass' not in options:
options['cssclass'] = options.pop('css_class', 'codehilite')
if 'wrapcode' not in options:
# Override Pygments default
options['wrapcode'] = True
# Disallow use of `full` option
options['full'] = False
self.options = options
def hilite(self, shebang: bool = True) -> str:
"""
Pass code to the [Pygments](https://pygments.org/) highlighter with
optional line numbers. The output should then be styled with CSS to
your liking. No styles are applied by default - only styling hooks
(i.e.: `<span class="k">`).
returns : A string of html.
"""
self.src = self.src.strip('\n')
if self.lang is None and shebang:
self._parseHeader()
if pygments and self.use_pygments:
try:
lexer = get_lexer_by_name(self.lang, **self.options)
except ValueError:
try:
if self.guess_lang:
lexer = guess_lexer(self.src, **self.options)
else:
lexer = get_lexer_by_name('text', **self.options)
except ValueError: # pragma: no cover
lexer = get_lexer_by_name('text', **self.options)
if not self.lang:
# Use the guessed lexer's language instead
self.lang = lexer.aliases[0]
lang_str = f'{self.lang_prefix}{self.lang}'
if isinstance(self.pygments_formatter, str):
try:
formatter = get_formatter_by_name(self.pygments_formatter, **self.options)
except ClassNotFound:
formatter = get_formatter_by_name('html', **self.options)
else:
formatter = self.pygments_formatter(lang_str=lang_str, **self.options)
return highlight(self.src, lexer, formatter)
else:
# just escape and build markup usable by JavaScript highlighting libraries
txt = self.src.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
classes = []
if self.lang:
classes.append('{}{}'.format(self.lang_prefix, self.lang))
if self.options['linenos']:
classes.append('linenums')
class_str = ''
if classes:
class_str = ' class="{}"'.format(' '.join(classes))
return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
self.options['cssclass'],
class_str,
txt
)
def _parseHeader(self) -> None:
"""
Determines language of a code block from shebang line and whether the
said line should be removed or left in place. If the shebang line
contains a path (even a single /) then it is assumed to be a real
shebang line and left alone. However, if no path is given
(e.i.: `#!python` or `:::python`) then it is assumed to be a mock shebang
for language identification of a code fragment and removed from the
code block prior to processing for code highlighting. When a mock
shebang (e.i: `#!python`) is found, line numbering is turned on. When
colons are found in place of a shebang (e.i.: `:::python`), line
numbering is left in the current state - off by default.
Also parses optional list of highlight lines, like:
:::python hl_lines="1 3"
"""
import re
# split text into lines
lines = self.src.split("\n")
# pull first line to examine
fl = lines.pop(0)
c = re.compile(r'''
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
(?P<lang>[\w#.+-]*) # The language
\s* # Arbitrary whitespace
# Optional highlight lines, single- or double-quote-delimited
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
''', re.VERBOSE)
# search first line for shebang
m = c.search(fl)
if m:
# we have a match
try:
self.lang = m.group('lang').lower()
except IndexError: # pragma: no cover
self.lang = None
if m.group('path'):
# path exists - restore first line
lines.insert(0, fl)
if self.options['linenos'] is None and m.group('shebang'):
# Overridable and Shebang exists - use line numbers
self.options['linenos'] = True
self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
else:
# No match
lines.insert(0, fl)
self.src = "\n".join(lines).strip("\n")
# ------------------ The Markdown Extension -------------------------------
|
CodeHilite
|
python
|
kamyu104__LeetCode-Solutions
|
Python/similar-rgb-color.py
|
{
"start": 29,
"end": 444
}
|
class ____(object):
def similarRGB(self, color):
"""
:type color: str
:rtype: str
"""
def rounding(color):
q, r = divmod(int(color, 16), 17)
if r > 8: q += 1
return '{:02x}'.format(17*q)
return '#' + \
rounding(color[1:3]) + \
rounding(color[3:5]) + \
rounding(color[5:7])
|
Solution
|
python
|
pytorch__pytorch
|
test/distributed/elastic/multiprocessing/api_test.py
|
{
"start": 1170,
"end": 2728
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.test_dir = tempfile.mkdtemp(prefix=f"{self.__class__.__name__}_")
def tearDown(self):
super().tearDown()
shutil.rmtree(self.test_dir)
def test_is_failed(self):
pr_success = RunProcsResult(return_values={0: "a", 1: "b"})
self.assertFalse(pr_success.is_failed())
fail0 = ProcessFailure(
local_rank=0, pid=998, exitcode=1, error_file="ignored.json"
)
pr_fail = RunProcsResult(failures={0: fail0})
self.assertTrue(pr_fail.is_failed())
def test_get_failures(self):
error_file0 = os.path.join(self.test_dir, "error0.json")
error_file1 = os.path.join(self.test_dir, "error1.json")
eh = ErrorHandler()
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": error_file0}):
eh.record_exception(RuntimeError("error 0"))
with mock.patch.dict(os.environ, {"TORCHELASTIC_ERROR_FILE": error_file0}):
eh.record_exception(RuntimeError("error 1"))
fail0 = ProcessFailure(
local_rank=0, pid=997, exitcode=1, error_file=error_file0
)
fail1 = ProcessFailure(
local_rank=1, pid=998, exitcode=3, error_file=error_file1
)
fail2 = ProcessFailure(
local_rank=2, pid=999, exitcode=15, error_file="no_exist.json"
)
self.assertLessEqual(fail0.timestamp, fail1.timestamp)
self.assertLessEqual(fail1.timestamp, fail2.timestamp)
|
RunProcResultsTest
|
python
|
Textualize__textual
|
tests/test_animation.py
|
{
"start": 5317,
"end": 7298
}
|
class ____(App[None]):
counter: var[float] = var(23)
def compose(self) -> ComposeResult:
yield CancelAnimWidget()
async def test_cancel_app_animation() -> None:
"""It should be possible to cancel a running app animation."""
async with CancelAnimApp().run_test() as pilot:
pilot.app.animate("counter", value=0, final_value=1000, duration=60)
await pilot.pause()
assert pilot.app.animator.is_being_animated(pilot.app, "counter")
await pilot.app.stop_animation("counter")
assert not pilot.app.animator.is_being_animated(pilot.app, "counter")
async def test_cancel_app_non_animation() -> None:
"""It should be possible to attempt to cancel a non-running app animation."""
async with CancelAnimApp().run_test() as pilot:
assert not pilot.app.animator.is_being_animated(pilot.app, "counter")
await pilot.app.stop_animation("counter")
assert not pilot.app.animator.is_being_animated(pilot.app, "counter")
async def test_cancel_widget_animation() -> None:
"""It should be possible to cancel a running widget animation."""
async with CancelAnimApp().run_test() as pilot:
widget = pilot.app.query_one(CancelAnimWidget)
widget.animate("counter", value=0, final_value=1000, duration=60)
await pilot.pause()
assert pilot.app.animator.is_being_animated(widget, "counter")
await widget.stop_animation("counter")
assert not pilot.app.animator.is_being_animated(widget, "counter")
async def test_cancel_widget_non_animation() -> None:
"""It should be possible to attempt to cancel a non-running widget animation."""
async with CancelAnimApp().run_test() as pilot:
widget = pilot.app.query_one(CancelAnimWidget)
assert not pilot.app.animator.is_being_animated(widget, "counter")
await widget.stop_animation("counter")
assert not pilot.app.animator.is_being_animated(widget, "counter")
|
CancelAnimApp
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-palindrome-after-substring-concatenation-i.py
|
{
"start": 64,
"end": 1290
}
|
class ____(object):
def longestPalindrome(self, s, t):
"""
:type s: str
:type t: str
:rtype: int
"""
def manacher(s):
s = '^#' + '#'.join(s) + '#$'
P = [0]*len(s)
C, R = 0, 0
for i in xrange(1, len(s)-1):
i_mirror = 2*C-i
if R > i:
P[i] = min(R-i, P[i_mirror])
while s[i+1+P[i]] == s[i-1-P[i]]:
P[i] += 1
if i+P[i] > R:
C, R = i, i+P[i]
return P
def longest_palindrome(s):
result = [0]*(len(s)+1)
P = manacher(s)
for i in xrange(1, len(P)-1):
result[(i-P[i])//2] = P[i]
return result
t = t[::-1]
p1 = longest_palindrome(s)
p2 = longest_palindrome(t)
result = 0
dp = [[0]*(len(t)+1) for _ in xrange(len(s)+1)]
for i in xrange(len(s)):
for j in xrange(len(t)):
dp[i+1][j+1] = dp[i][j]+2 if s[i] == t[j] else 0
result = max(result, dp[i+1][j+1]+max(p1[i+int(s[i] == t[j])] , p2[j+int(s[i] == t[j])]))
return result
|
Solution
|
python
|
huggingface__transformers
|
src/transformers/models/llama/modeling_llama.py
|
{
"start": 14701,
"end": 15242
}
|
class ____(PreTrainedModel):
config: LlamaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": LlamaDecoderLayer,
"attentions": LlamaAttention,
}
@auto_docstring
|
LlamaPreTrainedModel
|
python
|
pytorch__pytorch
|
torch/_library/fake_class_registry.py
|
{
"start": 2994,
"end": 16208
}
|
class ____:
def __init__(self) -> None:
self._registered_class: dict[str, Any] = {}
def has_impl(self, full_qualname: str) -> bool:
return full_qualname in self._registered_class
def get_impl(self, full_qualname: str) -> Any:
self._check_registered(full_qualname)
return self._registered_class[full_qualname]
def register(self, full_qualname: str, fake_class=None) -> None:
if self.has_impl(full_qualname):
log.warning(
"%s is already registered. Previous fake class is overridden with %s.",
full_qualname,
fake_class,
)
self._registered_class[full_qualname] = fake_class
def deregister(self, full_qualname: str) -> Any:
if not self.has_impl(full_qualname):
log.warning(
"Cannot deregister %s. Please use register_fake_class to register it first."
" Or do you dereigster it twice?",
full_qualname,
)
else:
return self._registered_class.pop(full_qualname)
def clear(self) -> None:
self._registered_class.clear()
def _check_registered(self, full_qualname: str) -> None:
if full_qualname not in self._registered_class:
raise RuntimeError(
f"{full_qualname} is not registered. Please use register_fake_class to register it first."
)
global_fake_class_registry = FakeClassRegistry()
# TODO: add this check at compile time for __obj_flatten__.
def _check_valid_flat_script_obj(flat_x):
if not isinstance(flat_x, tuple):
raise RuntimeError("Expect flat x to be a tuple.")
for tp in flat_x:
if not isinstance(tp, tuple):
raise RuntimeError("Expect flat x to be a tuple of tuples.")
if not len(tp) == 2 or not isinstance(tp[0], str):
raise RuntimeError(
"Expect element of flat x to be a tuple of two elements with first element being a string"
)
def tracing_with_real(x: torch.ScriptObject) -> bool:
if not hasattr(x, "tracing_mode"):
return False
assert x.tracing_mode() in [
"real",
"fake",
], f"tracing_mode can be either real or fake but got {x.tracing_mode()}"
return x.tracing_mode() == "real"
def maybe_to_fake_obj(
fake_mode,
x: Any,
) -> Union[FakeScriptObject, torch.ScriptObject]:
import torch.utils._pytree as pytree
from torch.utils._python_dispatch import _disable_current_modes
# When tracing with real mode, people should implement meta kernels that can
# handle the case of real script object + fake tensor inputs.
if tracing_with_real(x):
return x
from torch._library.opaque_object import (
FakeOpaqueObject,
is_opaque_type,
OpaqueTypeStr,
)
if x is None or is_opaque_type(type(x)) or str(x._type()) == OpaqueTypeStr:
# In order to make OpaqueObjects truly opaque, the fake kernel should
# not depend on the contents of the OpaqueObject at all.
fake_x_wrapped = FakeScriptObject(FakeOpaqueObject(), OpaqueTypeStr, None)
return fake_x_wrapped
else:
# x.__obj_flatten__() could be calling some tensor operations inside but we don't
# want to call these ops in surrounding dispatch modes when executing it.
# Otherwise, for example, the fake tensor modes will error out when the tensors inside
# script object execute some operations like clone if allow_non_fake_input flag is set.
with _disable_current_modes():
flat_x = x.__obj_flatten__() # type: ignore[attr-defined]
_check_valid_flat_script_obj(flat_x)
with fake_mode:
from torch._higher_order_ops.utils import _tensor_storage
storage_map = {
_tensor_storage(inp): i
for i, inp in enumerate(flat_x)
if isinstance(inp, torch.Tensor)
}
alias_map = {
i: storage_map[_tensor_storage(inp)]
for i, inp in enumerate(flat_x)
if isinstance(inp, torch.Tensor)
and storage_map[_tensor_storage(inp)] != i
}
if len(alias_map) > 0:
log.warning(
"Detected script object %s has aliasing relationship among its tensors. "
"Flattened obj: %s. Aliasing tensor indices: %s. "
"This is not supported and may cause unexpected behavior.",
x,
flat_x,
alias_map,
)
# This breaks the aliasing relationship among the tensors inside the torchbind object
# This is bad but since we don't need to preserve the aliasing relationship anyway and
# we state clearly that aliasing relationship is not preserved in the doc so this might be OK.
fake_flattened = pytree.tree_map_only(
torch.Tensor,
lambda t: torch.empty_strided(
t.size(),
t.stride(),
device=t.device,
dtype=t.dtype,
requires_grad=t.requires_grad,
layout=t.layout,
),
flat_x,
)
fake_x = _find_fake_class_for_script_object(x).__obj_unflatten__(fake_flattened)
fake_x_wrapped = FakeScriptObject(fake_x, x._type().qualified_name(), x) # type: ignore[attr-defined]
for name in x._method_names(): # type: ignore[attr-defined]
attr = getattr(fake_x, name, None)
if attr is not None:
if not callable(attr):
raise RuntimeError(f"Expect {name} to be a callable but got {attr}.")
real_attr = getattr(x, name) # type: ignore[attr-defined]
# real attr sometimes is not torch.ScriptMethod thus doesn't have schema e.g. __init___ or __eq__
method_schema: Optional[torch.FunctionSchema] = None
if isinstance(real_attr, torch.ScriptMethod):
method_schema = real_attr.schema # type: ignore[attr-defined]
# Bypasses our custom setattr function
object.__setattr__(
fake_x_wrapped,
name,
FakeScriptMethod(fake_x_wrapped, name, method_schema),
)
else:
override_skip_list = {"__obj_flatten__", "__getstate__", "__setstate__"}
if name not in override_skip_list:
log.warning("fake object of %s doesn't implement method %s.", x, name)
return fake_x_wrapped
def register_fake_class(qualname, fake_class: Optional[HasStaticMethodFromReal] = None):
r"""Register a fake implementation for this class.
It's in the same spirit of registering a fake implementation for
an operator but with the difference that it
associates a fake class with the original torch bind class (registered
with torch::class_). In this way, torch.compile can handle them properly
in components such as Dynamo and AOTAutograd.
This API may be used as a decorator (see example). For the fake class, users
are required to provide a from_real classmethod that takes a real object and
returns an instance of the fake class. All tensors in the fake object should also
be properly fakified with to_fake_tensor() in from_real.
Examples:
# For a custom class Foo defined in test_custom_class_registration.cpp:
TORCH_LIBRARY(_TorchScriptTesting, m) {
m.class_<TensorQueue>("_TensorQueue")
.def(torch::init<at::Tensor>())
.def("push", &TensorQueue::push)
.def("pop", &TensorQueue::pop)
.def("top", &TensorQueue::top)
.def("size", &TensorQueue::size)
.def("clone_queue", &TensorQueue::clone_queue)
.def("__obj_flatten__", &TensorQueue::__obj_flatten__)
.def_pickle(
// __getstate__
[](const c10::intrusive_ptr<TensorQueue>& self)
-> c10::Dict<std::string, at::Tensor> {
return self->serialize();
},
// __setstate__
[](c10::Dict<std::string, at::Tensor> data)
-> c10::intrusive_ptr<TensorQueue> {
return c10::make_intrusive<TensorQueue>(std::move(data));
});
};
# We could register a fake class FakeTensorQueue in Python as follows:
import torch
@torch._library.register_fake_class("_TorchScriptTesting::_TensorQueue")
class FakeTensorQueue:
def __init__(self, queue):
self.queue = queue
@classmethod
def __obj_unflatten__(cls, flattened_ctx):
return cls(**dict(ctx))
def push(self, x):
self.queue.append(x)
def pop(self):
return self.queue.pop(0)
def size(self):
return len(self.queue)
In this example, the original TensorQeue need to add a __obj_flatten__ method
to the class TensorQueue and the flattened result is passed into FakeTensorQueue's
__obj_unflatten__ as inputs to create a fake class. This protocol allows pytorch to look
at the contents of the script object and properly handle them in the subsystems
like dynamo, aot_aotugrad or more.
"""
def inner(fake_class: HasStaticMethodFromReal):
ns, name = parse_namespace(qualname)
# This also checks whether the referred torch::class_ exists.
torch._C._get_custom_class_python_wrapper(ns, name)
from_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None)
if not from_method:
raise RuntimeError(
f"{fake_class} doesn't define a classmethod {_CONVERT_FROM_REAL_NAME}."
)
if not isinstance(fake_class.__dict__[_CONVERT_FROM_REAL_NAME], classmethod):
raise RuntimeError(
f"{_CONVERT_FROM_REAL_NAME} method is not a classmethod."
)
global_fake_class_registry.register(_full_qual_class_name(qualname), fake_class)
return fake_class
if fake_class is None:
return inner
return inner(fake_class)
def deregister_fake_class(qualname):
return global_fake_class_registry.deregister(_full_qual_class_name(qualname))
def has_fake_class(full_qualname) -> bool:
return global_fake_class_registry.has_impl(full_qualname)
def find_fake_class(full_qualname) -> Optional[Any]:
if not has_fake_class(full_qualname):
return None
return global_fake_class_registry.get_impl(full_qualname)
def _full_qual_class_name(qualname: str) -> str:
ns, name = parse_namespace(qualname)
return "__torch__.torch.classes." + ns + "." + name
def _is_script_object(obj: Any) -> bool:
return isinstance(
obj, torch.ScriptObject
) and obj._type().qualified_name().startswith( # type: ignore[attr-defined]
"__torch__.torch.classes"
)
# Return the namespace and class name from fully qualified name.
def _ns_and_class_name(full_qualname: str) -> tuple[str, str]:
splits = full_qualname.split(".")
assert len(splits) == 5, f"Could not split {full_qualname=}"
_torch, _torch_ns, _classes, ns, class_name = splits
return ns, class_name
def _find_fake_class_for_script_object(x: torch.ScriptObject) -> Any:
full_qualname = x._type().qualified_name() # type: ignore[attr-defined]
ns, class_name = _ns_and_class_name(full_qualname)
fake_class = find_fake_class(full_qualname)
if fake_class is None:
raise RuntimeError(
f" ScriptObject's {full_qualname} haven't registered a fake class."
f" Please use register_fake_class({ns}::{class_name}) to annotate a fake class for the script obj."
f" Specifically, create a python class that implements a fake version for all the methods"
f" that're used in the program and put annotated class in the program e.g. after loading the library."
f" The fake methods can be written in the same way as a meta kernel for an operator but need to additionally"
f" simulate the object's states. Be sure to add a {_CONVERT_FROM_REAL_NAME} classmethod"
f" to enable creating a fake obj from a real one."
)
return fake_class
_CONVERT_FROM_REAL_NAME = "__obj_unflatten__"
def _fake_obj_from_real(fake_mode, x) -> Any:
fake_class = _find_fake_class_for_script_object(x)
from_real_method = getattr(fake_class, _CONVERT_FROM_REAL_NAME, None)
if not from_real_method:
raise RuntimeError(
f"{fake_class} must define a classmethod {_CONVERT_FROM_REAL_NAME}"
f" that converts the real object to the fake object."
)
# from_real defined by user need the ctx to fakify the tensor states.
ctx = torch._library.fake_impl.FakeImplCtx(fake_mode, None)
with torch._library.fake_impl.set_ctx_getter(lambda: ctx):
return fake_class.from_real(x)
|
FakeClassRegistry
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_horizontal_shard.py
|
{
"start": 24952,
"end": 25960
}
|
class ____(ShardTest, fixtures.MappedTest):
"""Use modern schema conventions along with SQLite ATTACH."""
schema = "changeme"
def _init_dbs(self):
e = testing_engine("sqlite://")
with e.connect() as conn:
for i in range(1, 5):
conn.exec_driver_sql(
'ATTACH DATABASE "shard%s_%s.db" AS shard%s'
% (i, provision.FOLLOWER_IDENT, i)
)
db1 = e.execution_options(schema_translate_map={"changeme": "shard1"})
db2 = e.execution_options(schema_translate_map={"changeme": "shard2"})
db3 = e.execution_options(schema_translate_map={"changeme": "shard3"})
db4 = e.execution_options(schema_translate_map={"changeme": "shard4"})
self.engine = e
return db1, db2, db3, db4
def teardown_test(self):
testing_reaper.checkin_all()
for i in range(1, 5):
os.remove("shard%d_%s.db" % (i, provision.FOLLOWER_IDENT))
|
AttachedFileShardTest
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_service.py
|
{
"start": 383,
"end": 7106
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ServiceSpec',
'status': 'V1ServiceStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Service - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Service. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Service. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Service.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Service. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Service. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Service. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Service.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Service. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Service. # noqa: E501
:return: The metadata of this V1Service. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Service.
:param metadata: The metadata of this V1Service. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Service. # noqa: E501
:return: The spec of this V1Service. # noqa: E501
:rtype: V1ServiceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Service.
:param spec: The spec of this V1Service. # noqa: E501
:type: V1ServiceSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Service. # noqa: E501
:return: The status of this V1Service. # noqa: E501
:rtype: V1ServiceStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Service.
:param status: The status of this V1Service. # noqa: E501
:type: V1ServiceStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Service):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Service):
return True
return self.to_dict() != other.to_dict()
|
V1Service
|
python
|
scipy__scipy
|
scipy/stats/tests/test_morestats.py
|
{
"start": 113329,
"end": 114220
}
|
class ____:
def test_array_like(self):
# array_like not applicable with SCIPY_ARRAY_API=1
x = stats.norm.rvs(size=100, loc=0, random_state=54321)
lmbda = 1
llf = stats.yeojohnson_llf(lmbda, x)
llf2 = stats.yeojohnson_llf(lmbda, list(x))
assert_allclose(llf, llf2, rtol=1e-12)
def test_2d_input(self, xp):
x = stats.norm.rvs(size=100, loc=10, random_state=54321)
x = xp.asarray(x)
lmbda = 1
ref = stats.yeojohnson_llf(lmbda, x)
res = stats.yeojohnson_llf(lmbda, xp.stack([x, x]).T)
xp_assert_close(res, xp.stack((ref, ref)), rtol=1e-12)
def test_empty(self, xp):
message = "One or more sample arguments is too small..."
with eager_warns(SmallSampleWarning, match=message, xp=xp):
assert xp.isnan(stats.yeojohnson_llf(1, xp.asarray([])))
|
TestYeojohnson_llf
|
python
|
tox-dev__tox
|
src/tox/config/cli/parser.py
|
{
"start": 4919,
"end": 5424
}
|
class ____(Namespace):
"""CLI options."""
@property
def verbosity(self) -> int:
""":return: reporting verbosity"""
result: int = max(self.verbose - self.quiet, 0)
return result
@property
def is_colored(self) -> bool:
""":return: flag indicating if the output is colored or not"""
return cast("bool", self.colored == "yes")
exit_and_dump_after: int
ArgumentArgs = tuple[tuple[str, ...], type[Any] | UnionType | None, dict[str, Any]]
|
Parsed
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 582320,
"end": 582954
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of EnablePullRequestAutoMerge"""
__schema__ = github_schema
__field_names__ = ("actor", "client_mutation_id", "pull_request")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
"""Identifies the actor who performed the event."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The pull request auto-merge was enabled on."""
|
EnablePullRequestAutoMergePayload
|
python
|
dagster-io__dagster
|
examples/docs_projects/project_ml/src/project_ml/defs/assets/prediction_assets.py
|
{
"start": 573,
"end": 7839
}
|
class ____(dg.Config):
"""Configuration for real-time prediction processing."""
batch_size: int = 10 # Default number of images to process at once
device: str = "cuda" # Will fallback to CPU if CUDA not available
confidence_threshold: float = 0.9 # Higher threshold for real-time predictions
return_probabilities: bool = False # Whether to return full probability distribution
# end_realtime_prediction_config
@dg.asset(
description="Generate predictions on uploaded digit images",
group_name="inference",
required_resource_keys={"model_storage"},
deps=["production_digit_classifier"],
)
def batch_digit_predictions(
context,
config: BatchPredictionConfig,
) -> dict[str, list]:
"""Process overnight batch of user-uploaded digit images."""
# Get the model store resource
model_store = context.resources.model_storage
try:
# List saved models and get the latest one
saved_models = model_store.list_models()
if not saved_models:
context.log.error("No saved models found")
return {"predictions": [], "confidences": []}
# Get the latest model name (first one is newest due to sorting)
latest_model_name = saved_models[0] # Already just the model name
context.log.info(f"Loading production model: {latest_model_name}")
# Load the model using the resource
model_data = model_store.load_model(latest_model_name)
# Handle both formats: dict with 'model' key or direct model object
if isinstance(model_data, dict) and "model" in model_data:
production_model = model_data["model"]
else:
production_model = model_data # Direct model object
context.log.info("Model loaded successfully")
context.log.info(f"Model architecture:\n{production_model!s}")
except Exception as e:
context.log.error(f"Failed to load production model: {e!s}")
context.log.error(f"Exception details: {e.__class__.__name__!s}")
import traceback
context.log.error(f"Traceback: {traceback.format_exc()}")
return {"predictions": [], "confidences": []}
# For demo purposes, create some dummy test images
dummy_images = torch.randn(config.num_test_images, 1, 28, 28)
device = torch.device(
config.device if torch.cuda.is_available() and config.device == "cuda" else "cpu"
)
production_model.to(device)
production_model.eval()
# Preprocess images
processed_images = dummy_images.float() / 255.0
dataset = TensorDataset(processed_images)
dataloader = DataLoader(dataset, batch_size=config.batch_size, shuffle=False)
predictions = []
confidences = []
with torch.no_grad():
for (data,) in dataloader:
_data = data.to(device)
outputs = production_model(_data)
probabilities = torch.softmax(outputs, dim=1)
predicted_classes = torch.argmax(probabilities, dim=1)
max_confidences = torch.max(probabilities, dim=1)[0]
predictions.extend(predicted_classes.cpu().numpy().tolist())
confidences.extend(max_confidences.cpu().numpy().tolist())
context.add_output_metadata(
{
"total_predictions": len(predictions),
"avg_confidence": float(np.mean(confidences)),
"low_confidence_count": sum(1 for c in confidences if c < config.confidence_threshold),
"confidence_threshold": config.confidence_threshold,
"model_path": latest_model_name,
},
output_name="result",
)
context.log.info(f"Generated {len(predictions)} batch predictions")
return {"predictions": predictions, "confidences": confidences}
@dg.asset(
description="Real-time digit prediction endpoint",
group_name="inference",
required_resource_keys={"model_storage"},
deps=["production_digit_classifier"],
)
def digit_predictions(
context,
config: RealTimePredictionConfig,
) -> dict[str, Any]:
"""Classify new handwritten digits in real-time."""
# Get the model store resource
model_store = context.resources.model_storage
try:
# List saved models and get the latest one
saved_models = model_store.list_models()
if not saved_models:
context.log.error("No saved models found")
return {
"prediction": None,
"confidence": 0.0,
"error": "No models available",
}
# Get the latest model name (first one is newest due to sorting)
latest_model_name = saved_models[0] # Already just the model name
context.log.info(f"Loading production model: {latest_model_name}")
# Load the model using the resource
model_data = model_store.load_model(latest_model_name)
# Handle both formats: dict with 'model' key or direct model object
if isinstance(model_data, dict) and "model" in model_data:
production_model = model_data["model"]
else:
production_model = model_data # Direct model object
context.log.info("Model loaded successfully")
except Exception as e:
context.log.error(f"Failed to load production model: {e!s}")
return {"prediction": None, "confidence": 0.0, "error": str(e)}
# For demo purposes, create some test images
input_images = torch.randn(config.batch_size, 1, 28, 28)
device = torch.device(
config.device if torch.cuda.is_available() and config.device == "cuda" else "cpu"
)
production_model.to(device)
production_model.eval()
# Preprocess input images
processed_images = input_images.float() / 255.0
predictions = []
confidences = []
all_probabilities = []
with torch.no_grad():
processed_images = processed_images.to(device)
outputs = production_model(processed_images)
probabilities = torch.softmax(outputs, dim=1)
predicted_classes = torch.argmax(probabilities, dim=1)
max_confidences = torch.max(probabilities, dim=1)[0]
predictions = predicted_classes.cpu().numpy().tolist()
confidences = max_confidences.cpu().numpy().tolist()
if config.return_probabilities:
all_probabilities = probabilities.cpu().numpy().tolist()
avg_confidence = float(np.mean(confidences))
context.add_output_metadata(
{
"prediction_count": len(predictions),
"avg_confidence": avg_confidence,
"high_confidence_predictions": sum(
1 for c in confidences if c >= config.confidence_threshold
),
"confidence_threshold": config.confidence_threshold,
"model_path": latest_model_name,
},
output_name="result",
)
result = {
"predictions": predictions,
"confidences": confidences,
}
if config.return_probabilities:
result["probabilities"] = all_probabilities
if avg_confidence < config.confidence_threshold:
context.log.warning(
f"Average confidence {avg_confidence:.2f} below threshold {config.confidence_threshold}"
)
return result
|
RealTimePredictionConfig
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/selectable.py
|
{
"start": 174991,
"end": 175890
}
|
class ____:
__slots__ = ()
_raw_columns: List[_ColumnsClauseElement]
_where_criteria: Tuple[ColumnElement[Any], ...]
_from_obj: Tuple[FromClause, ...]
def _iterate_from_elements(self) -> Iterator[FromClause]:
# note this does not include elements
# in _setup_joins
seen = set()
for element in self._raw_columns:
for fr in element._from_objects:
if fr in seen:
continue
seen.add(fr)
yield fr
for element in self._where_criteria:
for fr in element._from_objects:
if fr in seen:
continue
seen.add(fr)
yield fr
for element in self._from_obj:
if element in seen:
continue
seen.add(element)
yield element
|
_SelectFromElements
|
python
|
huggingface__transformers
|
src/transformers/models/distilbert/modeling_distilbert.py
|
{
"start": 2894,
"end": 5817
}
|
class ____(nn.Module):
def __init__(self, config: PreTrainedConfig):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)
self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)
self.dropout = nn.Dropout(config.dropout)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids: torch.Tensor,
input_embeds: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
if input_ids is not None:
input_embeds = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
seq_length = input_embeds.size(1)
if position_ids is None:
# Setting the position-ids to the registered buffer in constructor, it helps
# when tracing the model without passing position-ids, solves
# issues similar to issue #5664
if hasattr(self, "position_ids"):
position_ids = self.position_ids[:, :seq_length]
else:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = input_embeds + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
|
Embeddings
|
python
|
facebook__pyre-check
|
client/json_rpc.py
|
{
"start": 9279,
"end": 11551
}
|
class ____(Response):
code: int
message: str = ""
data: Optional[object] = None
activity_key: Optional[JSON] = None
def json(self) -> JSON:
return {
"jsonrpc": JSONRPC_VERSION,
**({"id": self.id} if self.id is not None else {}),
**(
{"activityKey": self.activity_key}
if self.activity_key is not None
else {}
),
"error": {
"code": self.code,
"message": self.message,
**({"data": self.data} if self.data is not None else {}),
},
}
@staticmethod
def from_json(response_json: JSON) -> "ErrorResponse":
"""
Parse a given JSON into a JSON-RPC error response.
Raises `InvalidRequestError` if the JSON body is malformed.
"""
_verify_json_rpc_version(response_json)
error = response_json.get("error")
if error is None:
raise InvalidRequestError(
f"Required field `error` is missing: {response_json}"
)
if not isinstance(error, dict):
raise InvalidRequestError(f"`error` must be a dict but got {error}")
code = error.get("code")
if code is None:
raise InvalidRequestError(
f"Required field `error.code` is missing: {response_json}"
)
if not isinstance(code, int):
raise InvalidRequestError(
f"`error.code` is expected to be an int but got {code}"
)
message = error.get("message", "")
if not isinstance(message, str):
raise InvalidRequestError(
f"`error.message` is expected to be a string but got {message}"
)
data = error.get("data")
# FIXME: The `id` field is required for the respnose, but we can't
# enforce it right now since the Pyre server may emit id-less responses
# and that has to be fixed first.
id = _parse_json_rpc_id(response_json)
activity_key = _parse_json_rpc_activity_key(response_json)
return ErrorResponse(
id=id, activity_key=activity_key, code=code, message=message, data=data
)
|
ErrorResponse
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/endpoints/validators/detector_workflow_mutation.py
|
{
"start": 41,
"end": 160
}
|
class ____(serializers.Serializer):
enabled = serializers.BooleanField(required=True)
|
DetectorWorkflowMutationValidator
|
python
|
getsentry__sentry
|
tests/sentry/integrations/api/endpoints/test_organization_integration_request.py
|
{
"start": 49,
"end": 2166
}
|
class ____(APITestCase):
"""Unit tests for emailing organization owners asking them to install an integration."""
endpoint = "sentry-api-0-organization-integration-request"
method = "post"
def setUp(self) -> None:
self.owner = self.user
self.member = self.create_user(email="member@example.com")
self.create_member(user=self.member, organization=self.organization, role="member")
self.login_as(user=self.member)
def test_integration_request(self) -> None:
self.get_success_response(
self.organization.slug,
providerSlug="github",
providerType="first_party",
)
def test_integration_request_with_invalid_plugin(self) -> None:
self.get_error_response(
self.organization.slug,
providerSlug="ERROR",
providerType="plugin",
status_code=400,
)
def test_integration_request_with_invalid_sentryapp(self) -> None:
self.get_error_response(
self.organization.slug,
providerSlug="ERROR",
providerType="sentry_app",
status_code=400,
)
def test_integration_request_with_invalid_integration(self) -> None:
self.get_error_response(
self.organization.slug,
providerSlug="ERROR",
providerType="first_party",
status_code=400,
)
def test_integration_request_as_owner(self) -> None:
self.login_as(user=self.owner)
response = self.get_success_response(
self.organization.slug,
providerSlug="github",
providerType="first_party",
)
assert response.data["detail"] == "User can install integration"
def test_integration_request_without_permissions(self) -> None:
self.login_as(user=self.create_user(email="nonmember@example.com"))
self.get_error_response(
self.organization.slug,
providerSlug="github",
providerType="first_party",
status_code=403,
)
|
OrganizationIntegrationRequestTest
|
python
|
great-expectations__great_expectations
|
tests/integration/sql_session_manager.py
|
{
"start": 563,
"end": 1329
}
|
class ____:
# The sqlalchemy connection pool class to use. In general we want to use QueuePool
poolclass: Type[Pool]
# The number of connections to keep in the pool
pool_size: int
# If all pool connections are used, we can create an additional max_overflow connections
# When returning connections to the pool, if the pool is full, additional connections will
# be discarded. This is specific for a QueuePool.
max_overflow: int
# The number of seconds a connection can be open for before we recycle it and create a new one.
pool_recycle: int
# Number of seconds to wait before giving up on getting a connection from the pool
pool_timeout: int
# Test connection liveness on checkout
pool_pre_ping: bool
|
PoolConfig
|
python
|
apache__airflow
|
providers/docker/tests/unit/docker/operators/test_docker.py
|
{
"start": 5124,
"end": 34427
}
|
class ____:
@pytest.fixture(autouse=True)
def setup_patchers(self, docker_api_client_patcher):
self.tempdir_patcher = mock.patch("airflow.providers.docker.operators.docker.TemporaryDirectory")
self.tempdir_mock = self.tempdir_patcher.start()
self.tempdir_mock.return_value.__enter__.return_value = TEMPDIR_MOCK_RETURN_VALUE
self.client_mock = mock.Mock(spec=APIClient)
self.client_mock.create_container.return_value = {"Id": "some_id"}
self.client_mock.images.return_value = []
self.client_mock.pull.return_value = {"status": "pull log"}
self.client_mock.wait.return_value = {"StatusCode": 0}
self.client_mock.create_host_config.return_value = mock.Mock()
self.log_messages = ["container log 😁 ", b"byte string container log"]
self.client_mock.attach.return_value = self.log_messages
# If logs() is called with tail then only return the last value, otherwise return the whole log.
self.client_mock.logs.side_effect = (
lambda **kwargs: iter(self.log_messages[-kwargs["tail"] :])
if "tail" in kwargs
else iter(self.log_messages)
)
docker_api_client_patcher.return_value = self.client_mock
def dotenv_mock_return_value(**kwargs):
env_dict = {}
env_str = kwargs["stream"]
for env_var in env_str.splitlines():
key, _, val = env_var.partition("=")
env_dict[key] = val
return env_dict
self.dotenv_patcher = mock.patch("airflow.providers.docker.operators.docker.dotenv_values")
self.dotenv_mock = self.dotenv_patcher.start()
self.dotenv_mock.side_effect = dotenv_mock_return_value
yield
self.tempdir_patcher.stop()
self.dotenv_patcher.stop()
def test_execute(self):
stringio_patcher = mock.patch("airflow.providers.docker.operators.docker.StringIO")
stringio_mock = stringio_patcher.start()
stringio_mock.side_effect = lambda *args: args[0]
operator = DockerOperator(
api_version=TEST_API_VERSION,
command="env",
environment={"UNIT": "TEST"},
private_environment={"PRIVATE": "MESSAGE"},
env_file="ENV=FILE\nVAR=VALUE",
image=TEST_IMAGE,
network_mode="bridge",
owner="unittest",
task_id="unittest",
mounts=[Mount(source="/host/path", target="/container/path", type="bind")],
entrypoint=TEST_ENTRYPOINT,
working_dir="/container/path",
shm_size=1000,
tmp_dir=TEST_AIRFLOW_TEMP_DIRECTORY,
host_tmp_dir=TEST_HOST_TEMP_DIRECTORY,
container_name="test_container",
tty=True,
hostname=TEST_CONTAINER_HOSTNAME,
device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])],
log_opts_max_file="5",
log_opts_max_size="10m",
)
operator.execute(None)
self.client_mock.create_container.assert_called_once_with(
command="env",
name="test_container",
environment={
"AIRFLOW_TMP_DIR": TEST_AIRFLOW_TEMP_DIRECTORY,
"UNIT": "TEST",
"PRIVATE": "MESSAGE",
"ENV": "FILE",
"VAR": "VALUE",
},
host_config=self.client_mock.create_host_config.return_value,
image=TEST_IMAGE,
user=None,
entrypoint=["sh", "-c"],
working_dir="/container/path",
tty=True,
hostname=TEST_CONTAINER_HOSTNAME,
ports=[],
labels=None,
)
self.client_mock.create_host_config.assert_called_once_with(
mounts=[
Mount(source="/host/path", target="/container/path", type="bind"),
Mount(source="/mkdtemp", target=TEST_AIRFLOW_TEMP_DIRECTORY, type="bind"),
],
network_mode="bridge",
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])],
log_config=LogConfig(config={"max-size": "10m", "max-file": "5"}),
ipc_mode=None,
port_bindings={},
ulimits=[],
)
self.tempdir_mock.assert_called_once_with(dir=TEST_HOST_TEMP_DIRECTORY, prefix="airflowtmp")
self.client_mock.images.assert_called_once_with(name=TEST_IMAGE)
self.client_mock.attach.assert_called_once_with(
container="some_id", stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with(TEST_IMAGE, stream=True, decode=True)
self.client_mock.wait.assert_called_once_with("some_id")
assert operator.cli.pull(TEST_IMAGE, stream=True, decode=True) == self.client_mock.pull.return_value
stringio_mock.assert_called_once_with("ENV=FILE\nVAR=VALUE")
self.dotenv_mock.assert_called_once_with(stream="ENV=FILE\nVAR=VALUE")
stringio_patcher.stop()
def test_execute_no_temp_dir(self):
stringio_patcher = mock.patch("airflow.providers.docker.operators.docker.StringIO")
stringio_mock = stringio_patcher.start()
stringio_mock.side_effect = lambda *args: args[0]
operator = DockerOperator(
api_version="1.19",
command="env",
environment={"UNIT": "TEST"},
private_environment={"PRIVATE": "MESSAGE"},
env_file="ENV=FILE\nVAR=VALUE",
image=TEST_IMAGE,
network_mode="bridge",
owner="unittest",
task_id="unittest",
mounts=[Mount(source="/host/path", target="/container/path", type="bind")],
mount_tmp_dir=False,
entrypoint=TEST_ENTRYPOINT,
working_dir="/container/path",
shm_size=1000,
host_tmp_dir=TEST_HOST_TEMP_DIRECTORY,
container_name="test_container",
hostname=TEST_CONTAINER_HOSTNAME,
tty=True,
)
operator.execute(None)
self.client_mock.create_container.assert_called_once_with(
command="env",
name="test_container",
environment={"UNIT": "TEST", "PRIVATE": "MESSAGE", "ENV": "FILE", "VAR": "VALUE"},
host_config=self.client_mock.create_host_config.return_value,
image=TEST_IMAGE,
user=None,
entrypoint=["sh", "-c"],
working_dir="/container/path",
tty=True,
hostname=TEST_CONTAINER_HOSTNAME,
ports=[],
labels=None,
)
self.client_mock.create_host_config.assert_called_once_with(
mounts=[
Mount(source="/host/path", target="/container/path", type="bind"),
],
network_mode="bridge",
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
log_config=LogConfig(config={}),
ipc_mode=None,
port_bindings={},
ulimits=[],
)
self.tempdir_mock.assert_not_called()
self.client_mock.images.assert_called_once_with(name=TEST_IMAGE)
self.client_mock.attach.assert_called_once_with(
container="some_id", stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with(TEST_IMAGE, stream=True, decode=True)
self.client_mock.wait.assert_called_once_with("some_id")
assert operator.cli.pull(TEST_IMAGE, stream=True, decode=True) == self.client_mock.pull.return_value
stringio_mock.assert_called_once_with("ENV=FILE\nVAR=VALUE")
self.dotenv_mock.assert_called_once_with(stream="ENV=FILE\nVAR=VALUE")
stringio_patcher.stop()
def test_execute_fallback_temp_dir(self, caplog):
self.client_mock.create_container.side_effect = [
APIError(message=f"wrong path: {TEMPDIR_MOCK_RETURN_VALUE}"),
{"Id": "some_id"},
]
stringio_patcher = mock.patch("airflow.providers.docker.operators.docker.StringIO")
stringio_mock = stringio_patcher.start()
stringio_mock.side_effect = lambda *args: args[0]
operator = DockerOperator(
api_version="1.19",
command="env",
environment={"UNIT": "TEST"},
private_environment={"PRIVATE": "MESSAGE"},
env_file="ENV=FILE\nVAR=VALUE",
image=TEST_IMAGE,
network_mode="bridge",
owner="unittest",
task_id="unittest",
mounts=[Mount(source="/host/path", target="/container/path", type="bind")],
mount_tmp_dir=True,
entrypoint=TEST_ENTRYPOINT,
working_dir="/container/path",
shm_size=1000,
host_tmp_dir=TEST_HOST_TEMP_DIRECTORY,
tmp_dir=TEST_AIRFLOW_TEMP_DIRECTORY,
container_name="test_container",
tty=True,
)
caplog.clear()
with caplog.at_level(logging.WARNING, logger=operator.log.name):
operator.execute(None)
warning_message = (
"Using remote engine or docker-in-docker and mounting temporary volume from host "
"is not supported. Falling back to `mount_tmp_dir=False` mode. "
"You can set `mount_tmp_dir` parameter to False to disable mounting and remove the warning"
)
assert warning_message in caplog.messages
self.client_mock.create_container.assert_has_calls(
[
call(
command="env",
name="test_container",
environment={
"AIRFLOW_TMP_DIR": TEST_AIRFLOW_TEMP_DIRECTORY,
"UNIT": "TEST",
"PRIVATE": "MESSAGE",
"ENV": "FILE",
"VAR": "VALUE",
},
host_config=self.client_mock.create_host_config.return_value,
image=TEST_IMAGE,
user=None,
entrypoint=["sh", "-c"],
working_dir="/container/path",
tty=True,
hostname=None,
ports=[],
labels=None,
),
call(
command="env",
name="test_container",
environment={"UNIT": "TEST", "PRIVATE": "MESSAGE", "ENV": "FILE", "VAR": "VALUE"},
host_config=self.client_mock.create_host_config.return_value,
image=TEST_IMAGE,
user=None,
entrypoint=["sh", "-c"],
working_dir="/container/path",
tty=True,
hostname=None,
ports=[],
labels=None,
),
]
)
self.client_mock.create_host_config.assert_has_calls(
[
call(
mounts=[
Mount(source="/host/path", target="/container/path", type="bind"),
Mount(source="/mkdtemp", target=TEST_AIRFLOW_TEMP_DIRECTORY, type="bind"),
],
network_mode="bridge",
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
log_config=LogConfig(config={}),
ipc_mode=None,
port_bindings={},
ulimits=[],
),
call(
mounts=[
Mount(source="/host/path", target="/container/path", type="bind"),
],
network_mode="bridge",
shm_size=1000,
cpu_shares=1024,
mem_limit=None,
auto_remove=False,
dns=None,
dns_search=None,
cap_add=None,
extra_hosts=None,
privileged=False,
device_requests=None,
log_config=LogConfig(config={}),
ipc_mode=None,
port_bindings={},
ulimits=[],
),
]
)
self.tempdir_mock.assert_called_once_with(dir=TEST_HOST_TEMP_DIRECTORY, prefix="airflowtmp")
self.client_mock.images.assert_called_once_with(name=TEST_IMAGE)
self.client_mock.attach.assert_called_once_with(
container="some_id", stdout=True, stderr=True, stream=True
)
self.client_mock.pull.assert_called_once_with(TEST_IMAGE, stream=True, decode=True)
self.client_mock.wait.assert_called_once_with("some_id")
assert operator.cli.pull(TEST_IMAGE, stream=True, decode=True) == self.client_mock.pull.return_value
stringio_mock.assert_called_with("ENV=FILE\nVAR=VALUE")
self.dotenv_mock.assert_called_with(stream="ENV=FILE\nVAR=VALUE")
stringio_patcher.stop()
def test_private_environment_is_private(self):
operator = DockerOperator(
private_environment={"PRIVATE": "MESSAGE"}, image=TEST_IMAGE, task_id="unittest"
)
assert operator._private_environment == {"PRIVATE": "MESSAGE"}, (
"To keep this private, it must be an underscored attribute."
)
@mock.patch("airflow.providers.docker.operators.docker.StringIO")
def test_environment_overrides_env_file(self, stringio_mock):
stringio_mock.side_effect = lambda *args: args[0]
operator = DockerOperator(
command="env",
environment={"UNIT": "TEST"},
private_environment={"PRIVATE": "MESSAGE"},
env_file="UNIT=FILE\nPRIVATE=FILE\nVAR=VALUE",
image=TEST_IMAGE,
task_id="unittest",
entrypoint=TEST_ENTRYPOINT,
working_dir="/container/path",
host_tmp_dir=TEST_HOST_TEMP_DIRECTORY,
tmp_dir=TEST_AIRFLOW_TEMP_DIRECTORY,
container_name="test_container",
tty=True,
)
operator.execute(None)
self.client_mock.create_container.assert_called_once_with(
command="env",
name="test_container",
environment={
"AIRFLOW_TMP_DIR": TEST_AIRFLOW_TEMP_DIRECTORY,
"UNIT": "TEST",
"PRIVATE": "MESSAGE",
"VAR": "VALUE",
},
host_config=self.client_mock.create_host_config.return_value,
image=TEST_IMAGE,
user=None,
entrypoint=["sh", "-c"],
working_dir="/container/path",
tty=True,
hostname=None,
ports=[],
labels=None,
)
stringio_mock.assert_called_once_with("UNIT=FILE\nPRIVATE=FILE\nVAR=VALUE")
self.dotenv_mock.assert_called_once_with(stream="UNIT=FILE\nPRIVATE=FILE\nVAR=VALUE")
def test_execute_unicode_logs(self):
self.client_mock.attach.return_value = ["unicode container log 😁"]
original_raise_exceptions = logging.raiseExceptions
logging.raiseExceptions = True
operator = DockerOperator(image=TEST_IMAGE, owner="unittest", task_id="unittest")
with mock.patch("traceback.print_exception") as print_exception_mock:
operator.execute(None)
logging.raiseExceptions = original_raise_exceptions
print_exception_mock.assert_not_called()
@pytest.mark.parametrize(
("kwargs", "actual_exit_code", "expected_exc"),
[
({}, 0, None),
({}, 100, AirflowException),
({}, 101, AirflowException),
({"skip_on_exit_code": None}, 0, None),
({"skip_on_exit_code": None}, 100, AirflowException),
({"skip_on_exit_code": None}, 101, AirflowException),
({"skip_on_exit_code": 100}, 0, None),
({"skip_on_exit_code": 100}, 100, AirflowSkipException),
({"skip_on_exit_code": 100}, 101, AirflowException),
({"skip_on_exit_code": 0}, 0, AirflowSkipException),
({"skip_on_exit_code": [100]}, 0, None),
({"skip_on_exit_code": [100]}, 100, AirflowSkipException),
({"skip_on_exit_code": [100]}, 101, AirflowException),
({"skip_on_exit_code": [100, 102]}, 101, AirflowException),
({"skip_on_exit_code": (100,)}, 0, None),
({"skip_on_exit_code": (100,)}, 100, AirflowSkipException),
({"skip_on_exit_code": (100,)}, 101, AirflowException),
],
)
def test_skip(self, kwargs, actual_exit_code, expected_exc):
msg = {"StatusCode": actual_exit_code}
self.client_mock.wait.return_value = msg
operator = DockerOperator(image="ubuntu", owner="unittest", task_id="unittest", **kwargs)
if expected_exc is None:
operator.execute({})
else:
with pytest.raises(expected_exc):
operator.execute({})
def test_execute_container_fails(self):
failed_msg = {"StatusCode": 1}
log_line = ["unicode container log 😁 ", b"byte string container log"]
expected_message = "Docker container failed: {failed_msg}"
self.client_mock.attach.return_value = log_line
self.client_mock.wait.return_value = failed_msg
operator = DockerOperator(image="ubuntu", owner="unittest", task_id="unittest")
with pytest.raises(DockerContainerFailedException) as raised_exception:
operator.execute(None)
assert str(raised_exception.value) == expected_message.format(
failed_msg=failed_msg,
)
assert raised_exception.value.logs == [log_line[0].strip(), log_line[1].decode("utf-8")]
def test_auto_remove_container_fails(self):
self.client_mock.wait.return_value = {"StatusCode": 1}
operator = DockerOperator(image="ubuntu", owner="unittest", task_id="unittest", auto_remove="success")
operator.container = {"Id": "some_id"}
with pytest.raises(AirflowException):
operator.execute(None)
self.client_mock.remove_container.assert_called_once_with("some_id")
def test_execute_xcom_behavior(self):
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
kwargs = {
"api_version": "1.19",
"command": "env",
"environment": {"UNIT": "TEST"},
"private_environment": {"PRIVATE": "MESSAGE"},
"image": "ubuntu:latest",
"network_mode": "bridge",
"owner": "unittest",
"task_id": "unittest",
"mounts": [Mount(source="/host/path", target="/container/path", type="bind")],
"working_dir": "/container/path",
"shm_size": 1000,
"host_tmp_dir": "/host/airflow",
"container_name": "test_container",
"tty": True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
assert xcom_push_result == "byte string container log"
assert xcom_all_result == ["container log 😁", "byte string container log"]
assert no_xcom_push_result is None
def test_execute_xcom_behavior_bytes(self):
self.log_messages = [b"container log 1 ", b"container log 2"]
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
self.client_mock.attach.return_value = iter([b"container log 1 ", b"container log 2"])
# Make sure the logs side effect is updated after the change
self.client_mock.attach.side_effect = (
lambda **kwargs: iter(self.log_messages[-kwargs["tail"] :])
if "tail" in kwargs
else iter(self.log_messages)
)
kwargs = {
"api_version": "1.19",
"command": "env",
"environment": {"UNIT": "TEST"},
"private_environment": {"PRIVATE": "MESSAGE"},
"image": "ubuntu:latest",
"network_mode": "bridge",
"owner": "unittest",
"task_id": "unittest",
"mounts": [Mount(source="/host/path", target="/container/path", type="bind")],
"working_dir": "/container/path",
"shm_size": 1000,
"host_tmp_dir": "/host/airflow",
"container_name": "test_container",
"tty": True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
# Those values here are different than log above as they are from setup
assert xcom_push_result == "container log 2"
assert xcom_all_result == ["container log 1", "container log 2"]
assert no_xcom_push_result is None
def test_execute_xcom_behavior_no_result(self):
self.log_messages = []
self.client_mock.pull.return_value = [b'{"status":"pull log"}']
self.client_mock.attach.return_value = iter([])
kwargs = {
"api_version": "1.19",
"command": "env",
"environment": {"UNIT": "TEST"},
"private_environment": {"PRIVATE": "MESSAGE"},
"image": "ubuntu:latest",
"network_mode": "bridge",
"owner": "unittest",
"task_id": "unittest",
"mounts": [Mount(source="/host/path", target="/container/path", type="bind")],
"working_dir": "/container/path",
"shm_size": 1000,
"host_tmp_dir": "/host/airflow",
"container_name": "test_container",
"tty": True,
}
xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=False)
xcom_all_operator = DockerOperator(**kwargs, do_xcom_push=True, xcom_all=True)
no_xcom_push_operator = DockerOperator(**kwargs, do_xcom_push=False)
xcom_push_result = xcom_push_operator.execute(None)
xcom_all_result = xcom_all_operator.execute(None)
no_xcom_push_result = no_xcom_push_operator.execute(None)
assert xcom_push_result is None
assert xcom_all_result is None
assert no_xcom_push_result is None
def test_extra_hosts(self):
hosts_obj = mock.Mock()
operator = DockerOperator(task_id="test", image="test", extra_hosts=hosts_obj)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert "host_config" in self.client_mock.create_container.call_args.kwargs
assert "extra_hosts" in self.client_mock.create_host_config.call_args.kwargs
assert hosts_obj is self.client_mock.create_host_config.call_args.kwargs["extra_hosts"]
def test_privileged(self):
privileged = mock.Mock()
operator = DockerOperator(task_id="test", image="test", privileged=privileged)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert "host_config" in self.client_mock.create_container.call_args.kwargs
assert "privileged" in self.client_mock.create_host_config.call_args.kwargs
assert privileged is self.client_mock.create_host_config.call_args.kwargs["privileged"]
def test_port_bindings(self):
port_bindings = {8000: 8080}
operator = DockerOperator(task_id="test", image="test", port_bindings=port_bindings)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert "host_config" in self.client_mock.create_container.call_args.kwargs
assert "port_bindings" in self.client_mock.create_host_config.call_args.kwargs
assert port_bindings == self.client_mock.create_host_config.call_args.kwargs["port_bindings"]
def test_ulimits(self):
ulimits = [Ulimit(name="nofile", soft=1024, hard=2048)]
operator = DockerOperator(task_id="test", image="test", ulimits=ulimits)
operator.execute(None)
self.client_mock.create_container.assert_called_once()
assert "host_config" in self.client_mock.create_container.call_args.kwargs
assert "ulimits" in self.client_mock.create_host_config.call_args.kwargs
assert ulimits == self.client_mock.create_host_config.call_args.kwargs["ulimits"]
@pytest.mark.parametrize(
"auto_remove",
["True", "false", pytest.param(None, id="none"), pytest.param(None, id="empty"), "here-and-now"],
)
def test_auto_remove_invalid(self, auto_remove):
with pytest.raises(ValueError, match="Invalid `auto_remove` value"):
DockerOperator(task_id="test", image="test", auto_remove=auto_remove)
def test_respect_docker_host_env(self, monkeypatch):
monkeypatch.setenv("DOCKER_HOST", "tcp://docker-host-from-env:2375")
operator = DockerOperator(task_id="test", image="test")
assert operator.docker_url == "tcp://docker-host-from-env:2375"
def test_docker_host_env_empty(self, monkeypatch):
monkeypatch.setenv("DOCKER_HOST", "")
operator = DockerOperator(task_id="test", image="test")
# The docker CLI ignores the empty string and defaults to unix://var/run/docker.sock
# We want to ensure the same behavior.
assert operator.docker_url == "unix://var/run/docker.sock"
def test_docker_host_env_unset(self, monkeypatch):
monkeypatch.delenv("DOCKER_HOST", raising=False)
operator = DockerOperator(task_id="test", image="test")
assert operator.docker_url == "unix://var/run/docker.sock"
@pytest.mark.parametrize(
("log_lines", "expected_lines"),
[
pytest.param(
[
"return self.main(*args, **kwargs)",
" ^^^^^^^^^^^^^^^^",
],
[
"return self.main(*args, **kwargs)",
" ^^^^^^^^^^^^^^^^",
],
id="should-not-remove-leading-spaces",
),
pytest.param(
[
" ^^^^^^^^^^^^^^^^ ",
],
[
" ^^^^^^^^^^^^^^^^",
],
id="should-remove-trailing-spaces",
),
],
)
@mock.patch("logging.Logger")
def test_fetch_logs(self, logger_mock, log_lines, expected_lines):
fetch_logs(log_lines, logger_mock)
assert logger_mock.info.call_args_list == [call("%s", line) for line in expected_lines]
@pytest.mark.parametrize("labels", ({"key": "value"}, ["key=value"]))
def test_labels(self, labels: dict[str, str] | list[str]):
operator = DockerOperator(task_id="test", image="test", labels=labels)
operator.execute({})
self.client_mock.create_container.assert_called_once()
assert "labels" in self.client_mock.create_container.call_args.kwargs
assert labels == self.client_mock.create_container.call_args.kwargs["labels"]
@pytest.mark.db_test
def test_basic_docker_operator_with_template_fields(self, dag_maker):
from docker.types import Mount
with dag_maker():
operator = DockerOperator(
task_id="test",
image="test",
container_name="python_{{dag_run.dag_id}}",
mounts=[Mount(source="workspace", target="/{{task_instance.run_id}}")],
)
operator.execute({})
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
rendered = ti.render_templates()
assert rendered.container_name == f"python_{dr.dag_id}"
assert rendered.mounts[0]["Target"] == f"/{ti.run_id}"
|
TestDockerOperator
|
python
|
django__django
|
tests/test_client_regress/tests.py
|
{
"start": 42187,
"end": 44174
}
|
class ____(SimpleTestCase):
def test_get(self):
"Request a view via request method GET"
response = self.client.get("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: GET")
def test_post(self):
"Request a view via request method POST"
response = self.client.post("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: POST")
def test_head(self):
"Request a view via request method HEAD"
response = self.client.head("/request_methods/")
self.assertEqual(response.status_code, 200)
# A HEAD request doesn't return any content.
self.assertNotEqual(response.content, b"request method: HEAD")
self.assertEqual(response.content, b"")
def test_options(self):
"Request a view via request method OPTIONS"
response = self.client.options("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: OPTIONS")
def test_put(self):
"Request a view via request method PUT"
response = self.client.put("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: PUT")
def test_delete(self):
"Request a view via request method DELETE"
response = self.client.delete("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: DELETE")
def test_patch(self):
"Request a view via request method PATCH"
response = self.client.patch("/request_methods/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b"request method: PATCH")
@override_settings(ROOT_URLCONF="test_client_regress.urls")
|
RequestMethodTests
|
python
|
zarr-developers__zarr-python
|
src/zarr/codecs/numcodecs/_codecs.py
|
{
"start": 8251,
"end": 8609
}
|
class ____(_NumcodecsArrayArrayCodec, codec_name="delta"):
def resolve_metadata(self, chunk_spec: ArraySpec) -> ArraySpec:
if astype := self.codec_config.get("astype"):
dtype = parse_dtype(np.dtype(astype), zarr_format=3) # type: ignore[call-overload]
return replace(chunk_spec, dtype=dtype)
return chunk_spec
|
Delta
|
python
|
pytorch__pytorch
|
torch/nn/cpp.py
|
{
"start": 1369,
"end": 3100
}
|
class ____(nn.Module):
"""A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and delegates all access."""
def __init__(self, cpp_module) -> None:
# Assign before the super class constructor so ``self.training`` can be
# assigned to in the super class constructor.
self.cpp_module = cpp_module
super().__init__()
self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment]
self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment]
self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment]
for attr in dir(cpp_module):
# Skip magic methods and the three attributes above.
if not attr.startswith("_"):
setattr(self, attr, getattr(self.cpp_module, attr))
def _apply(self, fn, recurse=True):
for param in self.parameters():
# Tensors stored in modules are graph leaves, and we don't
# want to create copy nodes, so we have to unpack the data.
param.data = fn(param.data)
if param._grad is not None:
param._grad.data = fn(param._grad.data)
for buf in self.buffers():
buf.data = fn(buf.data)
return self
# nn.Module defines training as a boolean
@property # type: ignore[override]
# pyrefly: ignore [bad-override]
def training(self):
return self.cpp_module.training
@training.setter
def training(self, mode) -> None:
self.cpp_module.train(mode)
def __repr__(self) -> str:
return self.cpp_module.__repr__()
|
ModuleWrapper
|
python
|
huggingface__transformers
|
tests/models/luke/test_tokenization_luke.py
|
{
"start": 983,
"end": 5205
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "studio-ousia/luke-base"
tokenizer_class = LukeTokenizer
from_pretrained_kwargs = {"cls_token": "<s>"}
integration_expected_tokens = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ92', '000', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.', 'Ċ', 'çĶŁ', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ', 'æĺ¯', 'Ċ', 'Hi', 'Ġ', 'ĠHello', 'Ċ', 'Hi', 'Ġ', 'Ġ', 'ĠHello', 'ĊĊ', 'Ġ', 'Ċ', 'Ġ', 'Ġ', 'Ċ', 'ĠHello', 'Ċ', '<s>', 'Ċ', 'hi', '<s>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.', 'Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸', 'Ľ', 'à¸', 'µ', 'Ġ', 'Ġ', 'Ġ', 'ird', 'Ġ', 'Ġ', 'Ġ', 'à¸', 'Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_token_ids = [713, 16, 10, 1296, 17841, 27969, 50118, 100, 21, 2421, 11, 8403, 151, 6, 8, 42, 16, 22461, 1140, 4, 50118, 48998, 37127, 20024, 2023, 44574, 49122, 4333, 36484, 7487, 3726, 48569, 50118, 30086, 1437, 20920, 50118, 30086, 1437, 1437, 20920, 50140, 1437, 50118, 1437, 1437, 50118, 20920, 50118, 0, 50118, 3592, 0, 8585, 50118, 133, 511, 6755, 197, 28, 5083, 45320, 35, 20920, 4, 50118, 1708, 1437, 8602, 8, 1437, 24107, 3726, 24107, 8906, 1437, 1437, 1437, 8602, 1437, 1437, 1437, 24107, 10674, 50118, 13368, 141, 32, 47, 608] # fmt: skip
expected_tokens_from_ids = ['This', 'Ġis', 'Ġa', 'Ġtest', 'ĠðŁĺ', 'Ĭ', 'Ċ', 'I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ92', '000', ',', 'Ġand', 'Ġthis', 'Ġis', 'Ġfals', 'é', '.', 'Ċ', 'çĶŁ', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ', 'æĺ¯', 'Ċ', 'Hi', 'Ġ', 'ĠHello', 'Ċ', 'Hi', 'Ġ', 'Ġ', 'ĠHello', 'ĊĊ', 'Ġ', 'Ċ', 'Ġ', 'Ġ', 'Ċ', 'ĠHello', 'Ċ', '<s>', 'Ċ', 'hi', '<s>', 'there', 'Ċ', 'The', 'Ġfollowing', 'Ġstring', 'Ġshould', 'Ġbe', 'Ġproperly', 'Ġencoded', ':', 'ĠHello', '.', 'Ċ', 'But', 'Ġ', 'ird', 'Ġand', 'Ġ', 'à¸', 'Ľ', 'à¸', 'µ', 'Ġ', 'Ġ', 'Ġ', 'ird', 'Ġ', 'Ġ', 'Ġ', 'à¸', 'Ķ', 'Ċ', 'Hey', 'Ġhow', 'Ġare', 'Ġyou', 'Ġdoing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊\nI was born in 92000, and this is falsé.\n生活的真谛是\nHi Hello\nHi Hello\n\n \n \n Hello\n<s>\nhi<s>there\nThe following string should be properly encoded: Hello.\nBut ird and ปี ird ด\nHey how are you doing"
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("studio-ousia/luke-large")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_text_from_decode = tokenizer.encode(
"sequence builders", add_special_tokens=True, add_prefix_space=False
)
encoded_pair_from_decode = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False
)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
self.assertEqual(encoded_sentence, encoded_text_from_decode)
self.assertEqual(encoded_pair, encoded_pair_from_decode)
def get_clean_sequence(self, tokenizer, max_length=20) -> tuple[str, list]:
txt = "Beyonce lives in Los Angeles"
ids = tokenizer.encode(txt, add_special_tokens=False)
return txt, ids
def test_padding_entity_inputs(self):
tokenizer = self.get_tokenizer()
sentence = "Japanese is an East Asian language spoken by about 128 million people, primarily in Japan."
span = (15, 34)
pad_id = tokenizer.entity_vocab["[PAD]"]
mask_id = tokenizer.entity_vocab["[MASK]"]
encoding = tokenizer([sentence, sentence], entity_spans=[[span], [span, span]], padding=True)
self.assertEqual(encoding["entity_ids"], [[mask_id, pad_id], [mask_id, mask_id]])
# test with a sentence with no entity
encoding = tokenizer([sentence, sentence], entity_spans=[[], [span, span]], padding=True)
self.assertEqual(encoding["entity_ids"], [[pad_id, pad_id], [mask_id, mask_id]])
@slow
@require_torch
|
LukeTokenizerTest
|
python
|
pytorch__pytorch
|
torch/testing/_internal/opinfo/core.py
|
{
"start": 116811,
"end": 124158
}
|
class ____(OpInfo):
"""Early version of a specialized OpInfo for foreach functions
The main differences from the parent class are (a) `dtypes`, `dtypesIfCUDA`, and `dtypesIfROCM`
are set to `get_all_dtypes(include_qint=False)`, and (b) the following arguments.
``supports_alpha_param=True`` means that the function supports a python scalar (``numbers.Number``)
as the last keyword argument such as `_foreach_add`.
``supports_scalar_self_arg=True`` means that the function can take a python scalar as its first argument.
Currently only `_foreach_pow` supports this.
``backward_requires_result=True``, which could sound self-explanatory, means that the function uses
the forward result for its backward computation.
"""
supports_alpha_param: bool = False
supports_scalar_self_arg: bool = False
backward_requires_result: bool = False
def __post_init__(self):
(
foreach_method,
foreach_method_inplace,
torch_ref_method,
torch_ref_inplace,
) = get_foreach_method_names(self.name)
if not self.supports_out:
# note(crcrpar): `foreach_method` for `"zero"` is `None` but `None` would call
# `_getattr_qual` in `OpInfo.__post_init__` which should fail since `_foreach_zero`
# is not defined at the moment. Thus to skip the qualification, set a similar torch
# function.
assert foreach_method is None
assert torch_ref_method is None
foreach_method = foreach_method_inplace
torch_ref_method = torch_ref_inplace
# We disable all complex128 tests internally for foreach due to reported flakiness
# tracked in #139648
supported_dtypes = get_all_dtypes(include_qint=False)
if IS_FBCODE:
supported_dtypes = [
x for x in supported_dtypes if x is not torch.complex128
]
self.dtypes = _dispatch_dtypes(supported_dtypes)
self.op = foreach_method
self.method_variant = foreach_method
self.ref = torch_ref_method
self.inplace_variant = foreach_method_inplace
self.ref_inplace = torch_ref_inplace
self.has_no_in_place = self.inplace_variant is None
name = self.name
self.name = f"_foreach_{name}"
if name == "norm":
self.ref = torch.linalg.vector_norm
elif name == "minimum":
# because minimum ref does not support inplace or scalar
self.ref = torch.clamp_max
self.ref_inplace = torch.Tensor.clamp_max_
elif name == "maximum":
# because maximum ref does not support inplace or scalar
self.ref = torch.clamp_min
self.ref_inplace = torch.Tensor.clamp_min_
# The following sets `dtypesIfCUDA` and `dtypesIfROCM` accordingly.
super().__post_init__()
def sample_zero_size_inputs(self, device, dtype, requires_grad=False, **kwargs):
if not hasattr(self.sample_inputs_func, "sample_zero_size_tensor_inputs"):
return []
return self.sample_inputs_func.sample_zero_size_tensor_inputs(
self, device, dtype, requires_grad, **kwargs
)
def gradcheck_wrapper_hermitian_input(op, input, *args, **kwargs):
"""Gradcheck wrapper for functions that take Hermitian matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the Hermitian property of the input.
"""
return op(input + input.mH, *args, **kwargs)
def gradcheck_wrapper_ctc_loss(op, input, *args, **kwargs):
"""Gradcheck wrapper for ctc loss to project onto log-simplex space."""
# See https://github.com/pytorch/pytorch/issues/52241
return op(input.log_softmax(dim=2), *args, **kwargs)
def gradcheck_wrapper_triangular_input(op, *args, upper=False, idx=0, **kwargs):
"""Gradcheck wrapper for functions that take lower or upper triangular matrices as input.
They require a modified function because the finite-difference algorithm
for calculating derivatives does not preserve the triangular property of the input.
`idx` is used to specific which `args[idx]` is to be triangularized.
"""
triangular_arg = args[idx].triu() if upper else args[idx].tril()
return op(*args[:idx], triangular_arg, *args[idx + 1 :], upper, **kwargs)
def gradcheck_wrapper_triangular_input_real_positive_diagonal(
op, *args, upper=False, idx=0, **kwargs
):
"""Gradcheck wrapper for functions that take lower/upper triangular matrices
with real and positive diagonals, for example, cholesky-like operations.
"""
arg = args[idx]
arg_diag = arg.diagonal(0, -2, -1)
arg_diag_embed = torch.diag_embed(arg_diag)
id_diag_tensor = torch.ones_like(arg_diag)
id_tensor = torch.diag_embed(id_diag_tensor)
# new_arg = arg - diag(arg) + I
new_arg = arg - arg_diag_embed + id_tensor
return gradcheck_wrapper_triangular_input(
op, *args[:idx], new_arg, *args[idx + 1 :], upper=upper, idx=idx, **kwargs
)
def gradcheck_wrapper_masked_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked operations.
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
mask = kwargs.get("mask")
if mask is not None:
output_mask = torch.masked._output_mask(op, input, *args, **kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def gradcheck_wrapper_masked_pointwise_operation(op, input, *args, **kwargs):
"""Gradcheck wrapper for masked pointwise operations. Assumes that the result
will be masked iff both tensors are masked at a specific index
When mask is specified, replaces masked-out elements with zeros.
Use for operations that produce non-finite masked-out elements,
for instance, for minimum and maximum reductions.
"""
output = op(input, *args, **kwargs)
input_mask = kwargs.get("input_mask")
other_mask = kwargs.get("other_mask")
if input_mask is not None and other_mask is not None:
combined_mask = torch.logical_and(input_mask, other_mask)
new_kwargs = dict(mask=combined_mask, **kwargs)
output_mask = torch.masked._input_mask(input, *args, **new_kwargs)
output = torch.where(output_mask, output, output.new_zeros([]))
return output
def clone_sample(sample, **kwargs):
"""
Given a SampleInput, this function analyzes its input, args and kwargs,
and produces a copy with each non-Tensor entry being copied by reference,
and with each Tensor entry cloned with `t.clone().requires_grad_(t.requires_grad)`
"""
def clone_tensor(t):
if isinstance(t, torch.Tensor):
return t.detach().clone().requires_grad_(t.requires_grad)
else:
return t
sample_kwargs = kwargs if kwargs else sample.kwargs
return SampleInput(
clone_tensor(sample.input),
args=tuple(map(clone_tensor, sample.args)),
kwargs={k: clone_tensor(v) for k, v in sample_kwargs.items()},
)
|
ForeachFuncInfo
|
python
|
google__pytype
|
pytype/state_test.py
|
{
"start": 534,
"end": 951
}
|
class ____:
def __init__(self, name, true_compat, false_compat):
self._name = name
self.compatible = {True: true_compat, False: false_compat}
def __str__(self):
return self._name
ONLY_TRUE = FakeValue("T", True, False)
ONLY_FALSE = FakeValue("F", False, True)
AMBIGUOUS = FakeValue("?", True, True)
def fake_compatible_with(value, logical_value):
return value.compatible[logical_value]
|
FakeValue
|
python
|
justquick__django-activity-stream
|
actstream/managers.py
|
{
"start": 4052,
"end": 6829
}
|
class ____(GFKManager):
"""
Manager for Follow model.
"""
def for_object(self, instance, flag=''):
"""
Filter to a specific instance.
"""
check(instance)
content_type = ContentType.objects.get_for_model(instance).pk
queryset = self.filter(content_type=content_type, object_id=instance.pk)
if flag:
queryset = queryset.filter(flag=flag)
return queryset
def is_following(self, user, instance, flag=''):
"""
Check if a user is following an instance.
"""
if not user or user.is_anonymous:
return False
queryset = self.for_object(instance)
if flag:
queryset = queryset.filter(flag=flag)
return queryset.filter(user=user).exists()
def followers_qs(self, actor, flag=''):
"""
Returns a queryset of User objects who are following the given actor (eg my followers).
"""
check(actor)
queryset = self.filter(
content_type=ContentType.objects.get_for_model(actor),
object_id=actor.pk
).select_related('user')
if flag:
queryset = queryset.filter(flag=flag)
return queryset
def followers(self, actor, flag=''):
"""
Returns a list of User objects who are following the given actor (eg my followers).
"""
user_ids = self.followers_qs(actor, flag=flag).values_list('user', flat=True)
return get_user_model().objects.filter(id__in=user_ids)
def following_qs(self, user, *models, **kwargs):
"""
Returns a queryset of actors that the given user is following (eg who im following).
Items in the list can be of any model unless a list of restricted models are passed.
Eg following(user, User) will only return users following the given user
"""
qs = self.filter(user=user)
ctype_filters = Q()
for model in models:
check(model)
ctype_filters |= Q(content_type=ContentType.objects.get_for_model(model))
qs = qs.filter(ctype_filters)
flag = kwargs.get('flag', '')
if flag:
qs = qs.filter(flag=flag)
return qs.fetch_generic_relations('follow_object')
def following(self, user, *models, **kwargs):
"""
Returns a list of actors that the given user is following (eg who im following).
Items in the list can be of any model unless a list of restricted models are passed.
Eg following(user, User) will only return users following the given user
"""
return [follow.follow_object for follow in self.following_qs(
user, *models, flag=kwargs.get('flag', '')
)]
|
FollowManager
|
python
|
PyCQA__pydocstyle
|
src/pydocstyle/checker.py
|
{
"start": 866,
"end": 47225
}
|
class ____:
"""Checker for PEP 257, NumPy and Google conventions.
D10x: Missing docstrings
D20x: Whitespace issues
D30x: Docstring formatting
D40x: Docstring content issues
"""
NUMPY_SECTION_NAMES = (
'Short Summary',
'Extended Summary',
'Parameters',
'Returns',
'Yields',
'Other Parameters',
'Raises',
'See Also',
'Notes',
'References',
'Examples',
'Attributes',
'Methods',
)
GOOGLE_SECTION_NAMES = (
'Args',
'Arguments',
'Attention',
'Attributes',
'Caution',
'Danger',
'Error',
'Example',
'Examples',
'Hint',
'Important',
'Keyword Args',
'Keyword Arguments',
'Methods',
'Note',
'Notes',
'Return',
'Returns',
'Raises',
'References',
'See Also',
'Tip',
'Todo',
'Warning',
'Warnings',
'Warns',
'Yield',
'Yields',
)
# Examples that will be matched -
# " random: Test" where random will be captured as the param
# " random : test" where random will be captured as the param
# " random_t (Test) : test " where random_t will be captured as the param
# Matches anything that fulfills all the following conditions:
GOOGLE_ARGS_REGEX = re(
# Begins with 0 or more whitespace characters
r"^\s*"
# Followed by 1 or more unicode chars, numbers or underscores
# The below is captured as the first group as this is the parameter name.
r"(\w+)"
# Followed by 0 or more whitespace characters
r"\s*"
# Matches patterns contained within round brackets.
# The `.*?`matches any sequence of characters in a non-greedy
# way (denoted by the `*?`)
r"(\(.*?\))?"
# Followed by 0 or more whitespace chars
r"\s*"
# Followed by a colon
r":"
# Might have a new line and leading whitespace
r"\n?\s*"
# Followed by 1 or more characters - which is the docstring for the parameter
".+"
)
SPHINX_ARGS_REGEX = re(
# Begins with 0 or more whitespace characters
r"^\s*"
# Followed by the parameter marker
r":param "
# Followed by 1 or more unicode chars, numbers or underscores and a colon
# The parameter name is captured as the first group.
r"(\w+):"
# Followed by 0 or more whitespace characters
r"\s*"
# Next is the parameter description
r".+$"
)
def check_source(
self,
source,
filename,
ignore_decorators=None,
property_decorators=None,
ignore_inline_noqa=False,
ignore_self_only_init=False,
):
self.property_decorators = (
{} if property_decorators is None else property_decorators
)
self.ignore_self_only_init = ignore_self_only_init
module = parse(StringIO(source), filename)
for definition in module:
for this_check in self.checks:
terminate = False
if isinstance(definition, this_check._check_for):
skipping_all = definition.skipped_error_codes == 'all'
decorator_skip = ignore_decorators is not None and any(
len(ignore_decorators.findall(dec.name)) > 0
for dec in definition.decorators
)
if (
ignore_inline_noqa or not skipping_all
) and not decorator_skip:
error = this_check(
self, definition, definition.docstring
)
else:
error = None
errors = error if hasattr(error, '__iter__') else [error]
for error in errors:
if error is not None and (
ignore_inline_noqa
or error.code not in definition.skipped_error_codes
):
partition = this_check.__doc__.partition('.\n')
message, _, explanation = partition
error.set_context(
explanation=explanation, definition=definition
)
yield error
if this_check._terminal:
terminate = True
break
if terminate:
break
@property
def checks(self):
all = [
this_check
for this_check in vars(type(self)).values()
if hasattr(this_check, '_check_for')
]
return sorted(all, key=lambda this_check: not this_check._terminal)
@check_for(Definition, terminal=True)
def check_docstring_missing(self, definition, docstring):
"""D10{0,1,2,3}: Public definitions should have docstrings.
All modules should normally have docstrings. [...] all functions and
classes exported by a module should also have docstrings. Public
methods (including the __init__ constructor) should also have
docstrings.
Note: Public (exported) definitions are either those with names listed
in __all__ variable (if present), or those that do not start
with a single underscore.
"""
def method_violation():
if definition.is_magic:
return violations.D105()
if definition.is_init:
if (
self.ignore_self_only_init
and len(definition.param_names) == 1
):
return None
return violations.D107()
if not definition.is_overload:
return violations.D102()
return None
if not docstring and definition.is_public:
codes = {
Module: violations.D100,
Class: violations.D101,
NestedClass: violations.D106,
Method: method_violation,
NestedFunction: violations.D103,
Function: (
lambda: violations.D103()
if not definition.is_overload
else None
),
Package: violations.D104,
}
return codes[type(definition)]()
@check_for(Definition, terminal=True)
def check_docstring_empty(self, definition, docstring):
"""D419: Docstring is empty.
If the user provided a docstring but it was empty, it is like they never provided one.
NOTE: This used to report as D10X errors.
"""
if docstring and is_blank(ast.literal_eval(docstring)):
return violations.D419()
@check_for(Definition)
def check_one_liners(self, definition, docstring):
"""D200: One-liner docstrings should fit on one line with quotes.
The closing quotes are on the same line as the opening quotes.
This looks better for one-liners.
"""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
non_empty_lines = sum(1 for l in lines if not is_blank(l))
if non_empty_lines == 1:
return violations.D200(len(lines))
@check_for(Function)
def check_no_blank_before(self, function, docstring): # def
"""D20{1,2}: No blank lines allowed around function/method docstring.
There's no blank line either before or after the docstring unless directly
followed by an inner function or class.
"""
if docstring:
before, _, after = function.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D201(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 0:
# Report a D202 violation if the docstring is followed by a blank line
# and the blank line is not itself followed by an inner function or
# class.
if not (
blanks_after_count == 1
and re(r"\s+(?:(?:class|def|async def)\s|@)").match(after)
):
yield violations.D202(blanks_after_count)
@check_for(Class)
def check_blank_before_after_class(self, class_, docstring):
"""D20{3,4}: Class docstring should have 1 blank line around them.
Insert a blank line before and after all docstrings (one-line or
multi-line) that document a class -- generally speaking, the class's
methods are separated from each other by a single blank line, and the
docstring needs to be offset from the first method by a blank line;
for symmetry, put a blank line between the class header and the
docstring.
"""
# NOTE: this gives false-positive in this case
# class Foo:
#
# """Docstring."""
#
#
# # comment here
# def foo(): pass
if docstring:
before, _, after = class_.source.partition(docstring)
blanks_before = list(map(is_blank, before.split('\n')[:-1]))
blanks_after = list(map(is_blank, after.split('\n')[1:]))
blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
blanks_after_count = sum(takewhile(bool, blanks_after))
if blanks_before_count != 0:
yield violations.D211(blanks_before_count)
if blanks_before_count != 1:
yield violations.D203(blanks_before_count)
if not all(blanks_after) and blanks_after_count != 1:
yield violations.D204(blanks_after_count)
@check_for(Definition)
def check_blank_after_summary(self, definition, docstring):
"""D205: Put one blank line between summary line and description.
Multi-line docstrings consist of a summary line just like a one-line
docstring, followed by a blank line, followed by a more elaborate
description. The summary line may be used by automatic indexing tools;
it is important that it fits on one line and is separated from the
rest of the docstring by a blank line.
"""
if docstring:
lines = ast.literal_eval(docstring).strip().split('\n')
if len(lines) > 1:
post_summary_blanks = list(map(is_blank, lines[1:]))
blanks_count = sum(takewhile(bool, post_summary_blanks))
if blanks_count != 1:
return violations.D205(blanks_count)
@staticmethod
def _get_docstring_indent(definition, docstring):
"""Return the indentation of the docstring's opening quotes."""
before_docstring, _, _ = definition.source.partition(docstring)
_, _, indent = before_docstring.rpartition('\n')
return indent
@check_for(Definition)
def check_indent(self, definition, docstring):
"""D20{6,7,8}: The entire docstring should be indented same as code.
The entire docstring is indented the same as the quotes at its
first line.
"""
if docstring:
indent = self._get_docstring_indent(definition, docstring)
lines = docstring.split('\n')
if len(lines) > 1:
# First line and line continuations need no indent.
lines = [
line
for i, line in enumerate(lines)
if i and not lines[i - 1].endswith('\\')
]
indents = [leading_space(l) for l in lines if not is_blank(l)]
if set(' \t') == set(''.join(indents) + indent):
yield violations.D206()
if (len(indents) > 1 and min(indents[:-1]) > indent) or (
len(indents) > 0 and indents[-1] > indent
):
yield violations.D208()
if len(indents) > 0 and min(indents) < indent:
yield violations.D207()
@check_for(Definition)
def check_newline_after_last_paragraph(self, definition, docstring):
"""D209: Put multi-line docstring closing quotes on separate line.
Unless the entire docstring fits on a line, place the closing
quotes on a line by themselves.
"""
if docstring:
lines = [
l
for l in ast.literal_eval(docstring).split('\n')
if not is_blank(l)
]
if len(lines) > 1:
if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
return violations.D209()
@check_for(Definition)
def check_surrounding_whitespaces(self, definition, docstring):
"""D210: No whitespaces allowed surrounding docstring text."""
if docstring:
lines = ast.literal_eval(docstring).split('\n')
if (
lines[0].startswith(' ')
or len(lines) == 1
and lines[0].endswith(' ')
):
return violations.D210()
@check_for(Definition)
def check_multi_line_summary_start(self, definition, docstring):
"""D21{2,3}: Multi-line docstring summary style check.
A multi-line docstring summary should start either at the first,
or separately at the second line of a docstring.
"""
if docstring:
start_triple = [
'"""',
"'''",
'u"""',
"u'''",
'r"""',
"r'''",
'ur"""',
"ur'''",
]
lines = ast.literal_eval(docstring).split('\n')
if len(lines) > 1:
first = docstring.split("\n")[0].strip().lower()
if first in start_triple:
return violations.D212()
else:
return violations.D213()
@check_for(Definition)
def check_triple_double_quotes(self, definition, docstring):
r'''D300: Use """triple double quotes""".
For consistency, always use """triple double quotes""" around
docstrings. Use r"""raw triple double quotes""" if you use any
backslashes in your docstrings. For Unicode docstrings, use
u"""Unicode triple-quoted strings""".
Note: Exception to this is made if the docstring contains
""" quotes in its body.
'''
if docstring:
if '"""' in ast.literal_eval(docstring):
# Allow ''' quotes if docstring contains """, because
# otherwise """ quotes could not be expressed inside
# docstring. Not in PEP 257.
regex = re(r"[uU]?[rR]?'''[^'].*")
else:
regex = re(r'[uU]?[rR]?"""[^"].*')
if not regex.match(docstring):
illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""")
illegal_quotes = illegal_matcher.match(docstring).group(1)
return violations.D300(illegal_quotes)
@check_for(Definition)
def check_backslashes(self, definition, docstring):
r'''D301: Use r""" if any backslashes in a docstring.
Use r"""raw triple double quotes""" if you use any backslashes
(\) in your docstrings.
Exceptions are backslashes for line-continuation and unicode escape
sequences \N... and \u... These are considered intended unescaped
content in docstrings.
'''
# Just check that docstring is raw, check_triple_double_quotes
# ensures the correct quotes.
if (
docstring
and re(r'\\[^\nuN]').search(docstring)
and not docstring.startswith(('r', 'ur'))
):
return violations.D301()
@staticmethod
def _check_ends_with(docstring, chars, violation):
"""First line ends with one of `chars`.
First line of the docstring should end with one of the characters in `chars`.
`chars` supports either a `str` or an `Iterable[str]`. If the condition is
evaluated to be false, it raises `violation`.
"""
if docstring:
summary_line = ast.literal_eval(docstring).strip().split('\n')[0]
if not summary_line.endswith(chars):
return violation(summary_line[-1])
@check_for(Definition)
def check_ends_with_period(self, definition, docstring):
"""D400: First line should end with a period.
The [first line of a] docstring is a phrase ending in a period.
"""
return self._check_ends_with(docstring, '.', violations.D400)
@check_for(Definition)
def check_ends_with_punctuation(self, definition, docstring):
"""D415: should end with proper punctuation.
The [first line of a] docstring is a phrase ending in a period,
question mark, or exclamation point
"""
return self._check_ends_with(
docstring, ('.', '!', '?'), violations.D415
)
@check_for(Function)
def check_imperative_mood(self, function, docstring): # def context
"""D401: First line should be in imperative mood: 'Do', not 'Does'.
[Docstring] prescribes the function or method's effect as a command:
("Do this", "Return that"), not as a description; e.g. don't write
"Returns the pathname ...".
"""
if (
docstring
and not function.is_test
and not function.is_property(self.property_decorators)
):
stripped = ast.literal_eval(docstring).strip()
if stripped:
first_word = strip_non_alphanumeric(stripped.split()[0])
check_word = first_word.lower()
if check_word in IMPERATIVE_BLACKLIST:
return violations.D401b(first_word)
correct_forms = IMPERATIVE_VERBS.get(stem(check_word))
if correct_forms and check_word not in correct_forms:
best = max(
correct_forms,
key=lambda f: common_prefix_length(check_word, f),
)
return violations.D401(best.capitalize(), first_word)
@check_for(Function)
def check_no_signature(self, function, docstring): # def context
"""D402: First line should not be function's or method's "signature".
The one-line docstring should NOT be a "signature" reiterating the
function/method parameters (which can be obtained by introspection).
"""
if docstring:
first_line = ast.literal_eval(docstring).strip().split('\n')[0]
if function.name + '(' in first_line.replace(' ', ''):
return violations.D402()
@check_for(Function)
def check_capitalized(self, function, docstring):
"""D403: First word of the first line should be properly capitalized.
The [first line of a] docstring is a phrase ending in a period.
"""
if docstring:
first_word = ast.literal_eval(docstring).split()[0]
if first_word == first_word.upper():
return
for char in first_word:
if char not in string.ascii_letters and char != "'":
return
if first_word != first_word.capitalize():
return violations.D403(first_word.capitalize(), first_word)
@check_for(Function)
def check_if_needed(self, function, docstring):
"""D418: Function decorated with @overload shouldn't contain a docstring.
Functions that are decorated with @overload are definitions,
and are for the benefit of the type checker only,
since they will be overwritten by the non-@overload-decorated definition.
"""
if docstring and function.is_overload:
return violations.D418()
@check_for(Definition)
def check_starts_with_this(self, function, docstring):
"""D404: First word of the docstring should not be `This`.
Docstrings should use short, simple language. They should not begin
with "This class is [..]" or "This module contains [..]".
"""
if not docstring:
return
stripped = ast.literal_eval(docstring).strip()
if not stripped:
return
first_word = strip_non_alphanumeric(stripped.split()[0])
if first_word.lower() == 'this':
return violations.D404()
@staticmethod
def _is_docstring_section(context):
"""Check if the suspected context is really a section header.
Lets have a look at the following example docstring:
'''Title.
Some part of the docstring that specifies what the function
returns. <----- Not a real section name. It has a suffix and the
previous line is not empty and does not end with
a punctuation sign.
This is another line in the docstring. It describes stuff,
but we forgot to add a blank line between it and the section name.
Parameters <-- A real section name. The previous line ends with
---------- a period, therefore it is in a new
grammatical context.
param : int
examples : list <------- Not a section - previous line doesn't end
A list of examples. with punctuation.
notes : list <---------- Not a section - there's text after the
A list of notes. colon.
Notes: <--- Suspected as a context because there's a suffix to the
----- section, but it's a colon so it's probably a mistake.
Bla.
'''
To make sure this is really a section we check these conditions:
* There's no suffix to the section name or it's just a colon AND
* The previous line is empty OR it ends with punctuation.
If one of the conditions is true, we will consider the line as
a section name.
"""
section_name_suffix = (
context.line.strip().lstrip(context.section_name.strip()).strip()
)
section_suffix_is_only_colon = section_name_suffix == ':'
punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
prev_line_ends_with_punctuation = any(
context.previous_line.strip().endswith(x) for x in punctuation
)
this_line_looks_like_a_section_name = (
is_blank(section_name_suffix) or section_suffix_is_only_colon
)
prev_line_looks_like_end_of_paragraph = (
prev_line_ends_with_punctuation or is_blank(context.previous_line)
)
return (
this_line_looks_like_a_section_name
and prev_line_looks_like_end_of_paragraph
)
@classmethod
def _check_blanks_and_section_underline(
cls, section_name, context, indentation
):
"""D4{07,08,09,12,14}, D215: Section underline checks.
Check for correct formatting for docstring sections. Checks that:
* The line that follows the section name contains
dashes (D40{7,8}).
* The amount of dashes is equal to the length of the section
name (D409).
* The section's content does not begin in the line that follows
the section header (D412).
* The section has no content (D414).
* The indentation of the dashed line is equal to the docstring's
indentation (D215).
"""
blank_lines_after_header = 0
for line in context.following_lines:
if not is_blank(line):
break
blank_lines_after_header += 1
else:
# There are only blank lines after the header.
yield violations.D407(section_name)
yield violations.D414(section_name)
return
non_empty_line = context.following_lines[blank_lines_after_header]
dash_line_found = ''.join(set(non_empty_line.strip())) == '-'
if not dash_line_found:
yield violations.D407(section_name)
if blank_lines_after_header > 0:
yield violations.D412(section_name)
else:
if blank_lines_after_header > 0:
yield violations.D408(section_name)
if non_empty_line.strip() != "-" * len(section_name):
yield violations.D409(
len(section_name),
section_name,
len(non_empty_line.strip()),
)
if leading_space(non_empty_line) > indentation:
yield violations.D215(section_name)
line_after_dashes_index = blank_lines_after_header + 1
# If the line index after the dashes is in range (perhaps we have
# a header + underline followed by another section header).
if line_after_dashes_index < len(context.following_lines):
line_after_dashes = context.following_lines[
line_after_dashes_index
]
if is_blank(line_after_dashes):
rest_of_lines = context.following_lines[
line_after_dashes_index:
]
if not is_blank(''.join(rest_of_lines)):
yield violations.D412(section_name)
else:
yield violations.D414(section_name)
else:
yield violations.D414(section_name)
@classmethod
def _check_common_section(
cls, docstring, definition, context, valid_section_names
):
"""D4{05,10,11,13}, D214: Section name checks.
Check for valid section names. Checks that:
* The section name is properly capitalized (D405).
* The section is not over-indented (D214).
* There's a blank line after the section (D410, D413).
* There's a blank line before the section (D411).
Also yields all the errors from `_check_blanks_and_section_underline`.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
if (
context.section_name not in valid_section_names
and capitalized_section in valid_section_names
):
yield violations.D405(capitalized_section, context.section_name)
if leading_space(context.line) > indentation:
yield violations.D214(capitalized_section)
if not context.following_lines or not is_blank(
context.following_lines[-1]
):
if context.is_last_section:
yield violations.D413(capitalized_section)
else:
yield violations.D410(capitalized_section)
if not is_blank(context.previous_line):
yield violations.D411(capitalized_section)
yield from cls._check_blanks_and_section_underline(
capitalized_section, context, indentation
)
@classmethod
def _check_numpy_section(cls, docstring, definition, context):
"""D406: NumPy-style section name checks.
Check for valid section names. Checks that:
* The section name has no superfluous suffix to it (D406).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
indentation = cls._get_docstring_indent(definition, docstring)
capitalized_section = context.section_name.title()
yield from cls._check_common_section(
docstring, definition, context, cls.NUMPY_SECTION_NAMES
)
suffix = context.line.strip().lstrip(context.section_name)
if suffix:
yield violations.D406(capitalized_section, context.line.strip())
if capitalized_section == "Parameters":
yield from cls._check_parameters_section(
docstring, definition, context
)
@staticmethod
def _check_parameters_section(docstring, definition, context):
"""D417: `Parameters` section check for numpy style.
Check for a valid `Parameters` section. Checks that:
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
"""
docstring_args = set()
section_level_indent = leading_space(context.line)
# Join line continuations, then resplit by line.
content = (
'\n'.join(context.following_lines).replace('\\\n', '').split('\n')
)
for current_line, next_line in zip(content, content[1:]):
# All parameter definitions in the Numpy parameters
# section must be at the same indent level as the section
# name.
# Also, we ensure that the following line is indented,
# and has some string, to ensure that the parameter actually
# has a description.
# This means, this is a parameter doc with some description
if (
(leading_space(current_line) == section_level_indent)
and (
len(leading_space(next_line))
> len(leading_space(current_line))
)
and next_line.strip()
):
# In case the parameter has type definitions, it
# will have a colon
if ":" in current_line:
parameters, parameter_type = current_line.split(":", 1)
# Else, we simply have the list of parameters defined
# on the current line.
else:
parameters = current_line.strip()
# Numpy allows grouping of multiple parameters of same
# type in the same line. They are comma separated.
parameter_list = parameters.split(",")
for parameter in parameter_list:
docstring_args.add(parameter.strip())
yield from ConventionChecker._check_missing_args(
docstring_args, definition
)
@staticmethod
def _check_args_section(docstring, definition, context):
"""D417: `Args` section checks.
Check for a valid `Args` or `Argument` section. Checks that:
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
Documentation for each arg should start at the same indentation
level. For example, in this case x and y are distinguishable::
Args:
x: Lorem ipsum dolor sit amet
y: Ut enim ad minim veniam
In the case below, we only recognize x as a documented parameter
because the rest of the content is indented as if it belongs
to the description for x::
Args:
x: Lorem ipsum dolor sit amet
y: Ut enim ad minim veniam
"""
docstring_args = set()
# normalize leading whitespace
if context.following_lines:
# any lines with shorter indent than the first one should be disregarded
first_line = context.following_lines[0]
leading_whitespaces = first_line[: -len(first_line.lstrip())]
args_content = dedent(
"\n".join(
[
line
for line in context.following_lines
if line.startswith(leading_whitespaces) or line == ""
]
)
).strip()
args_sections = []
for line in args_content.splitlines(keepends=True):
if not line[:1].isspace():
# This line is the start of documentation for the next
# parameter because it doesn't start with any whitespace.
args_sections.append(line)
else:
# This is a continuation of documentation for the last
# parameter because it does start with whitespace.
args_sections[-1] += line
for section in args_sections:
match = ConventionChecker.GOOGLE_ARGS_REGEX.match(section)
if match:
docstring_args.add(match.group(1))
yield from ConventionChecker._check_missing_args(
docstring_args, definition
)
@staticmethod
def _find_sphinx_params(lines):
"""D417: Sphinx param section checks.
Check for a valid Sphinx-style parameter section.
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
Documentation for each arg should start at the same indentation
level::
:param x: Lorem ipsum dolor sit amet
:param y: Ut enim ad minim veniam
"""
params = []
for line in lines:
match = ConventionChecker.SPHINX_ARGS_REGEX.match(line)
if match:
params.append(match.group(1))
return params
@staticmethod
def _check_sphinx_params(lines, definition):
"""D417: Sphinx param section checks.
Check for a valid Sphinx-style parameter section.
* The section documents all function arguments (D417)
except `self` or `cls` if it is a method.
Documentation for each arg should start at the same indentation
level. For example, in this case x and y are distinguishable::
:param x: Lorem ipsum dolor sit amet
:param y: Ut enim ad minim veniam
In the case below, we only recognize x as a documented parameter
because the rest of the content is indented as if it belongs
to the description for x::
:param x: Lorem ipsum dolor sit amet
:param y: Ut enim ad minim veniam
"""
docstring_args = set(ConventionChecker._find_sphinx_params(lines))
if docstring_args:
yield from ConventionChecker._check_missing_args(
docstring_args, definition
)
return True
return False
@staticmethod
def _check_missing_args(docstring_args, definition):
"""D417: Yield error for missing arguments in docstring.
Given a list of arguments found in the docstring and the
callable definition, it checks if all the arguments of the
callable are present in the docstring, else it yields a
D417 with a list of missing arguments.
"""
if isinstance(definition, Function):
function_args = get_function_args(definition.source)
# If the method isn't static, then we skip the first
# positional argument as it is `cls` or `self`
if definition.kind == 'method' and not definition.is_static:
function_args = function_args[1:]
# Filtering out any arguments prefixed with `_` marking them
# as private.
function_args = [
arg_name
for arg_name in function_args
if not is_def_arg_private(arg_name)
]
missing_args = set(function_args) - docstring_args
if missing_args:
yield violations.D417(
", ".join(sorted(missing_args)), definition.name
)
@classmethod
def _check_google_section(cls, docstring, definition, context):
"""D416: Google-style section name checks.
Check for valid section names. Checks that:
* The section does not contain any blank line between its name
and content (D412).
* The section is not empty (D414).
* The section name has colon as a suffix (D416).
Additionally, also yield all violations from `_check_common_section`
which are style-agnostic section checks.
"""
capitalized_section = context.section_name.title()
yield from cls._check_common_section(
docstring, definition, context, cls.GOOGLE_SECTION_NAMES
)
suffix = context.line.strip().lstrip(context.section_name)
if suffix != ":":
yield violations.D416(
capitalized_section + ":", context.line.strip()
)
if capitalized_section in ("Args", "Arguments"):
yield from cls._check_args_section(docstring, definition, context)
@staticmethod
def _get_section_contexts(lines, valid_section_names):
"""Generate `SectionContext` objects for valid sections.
Given a list of `valid_section_names`, generate an
`Iterable[SectionContext]` which provides:
* Section Name
* String value of the previous line
* The section line
* Following lines till the next section
* Line index of the beginning of the section in the docstring
* Boolean indicating whether the section is the last section.
for each valid section.
"""
lower_section_names = [s.lower() for s in valid_section_names]
def _suspected_as_section(_line):
result = get_leading_words(_line.lower())
return result in lower_section_names
# Finding our suspects.
suspected_section_indices = [
i for i, line in enumerate(lines) if _suspected_as_section(line)
]
SectionContext = namedtuple(
'SectionContext',
(
'section_name',
'previous_line',
'line',
'following_lines',
'original_index',
'is_last_section',
),
)
# First - create a list of possible contexts. Note that the
# `following_lines` member is until the end of the docstring.
contexts = (
SectionContext(
get_leading_words(lines[i].strip()),
lines[i - 1],
lines[i],
lines[i + 1 :],
i,
False,
)
for i in suspected_section_indices
)
# Now that we have manageable objects - rule out false positives.
contexts = (
c for c in contexts if ConventionChecker._is_docstring_section(c)
)
# Now we shall trim the `following lines` field to only reach the
# next section name.
for a, b in pairwise(contexts, None):
end = -1 if b is None else b.original_index
yield SectionContext(
a.section_name,
a.previous_line,
a.line,
lines[a.original_index + 1 : end],
a.original_index,
b is None,
)
def _check_numpy_sections(self, lines, definition, docstring):
"""NumPy-style docstring sections checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Short Summary
-------------
This is my summary.
Returns
-------
None.
'''
Section names appear in `NUMPY_SECTION_NAMES`.
Yields all violation from `_check_numpy_section` for each valid
Numpy-style section.
"""
found_any_numpy_section = False
for ctx in self._get_section_contexts(lines, self.NUMPY_SECTION_NAMES):
found_any_numpy_section = True
yield from self._check_numpy_section(docstring, definition, ctx)
return found_any_numpy_section
def _check_google_sections(self, lines, definition, docstring):
"""Google-style docstring section checks.
Check the general format of a sectioned docstring:
'''This is my one-liner.
Note:
This is my summary.
Returns:
None.
'''
Section names appear in `GOOGLE_SECTION_NAMES`.
Yields all violation from `_check_google_section` for each valid
Google-style section.
"""
for ctx in self._get_section_contexts(
lines, self.GOOGLE_SECTION_NAMES
):
yield from self._check_google_section(docstring, definition, ctx)
@check_for(Definition)
def check_docstring_sections(self, definition, docstring):
"""Check for docstring sections."""
if not docstring:
return
lines = docstring.split("\n")
if len(lines) < 2:
return
found_numpy = yield from self._check_numpy_sections(
lines, definition, docstring
)
if found_numpy:
return
found_sphinx = yield from self._check_sphinx_params(lines, definition)
if found_sphinx:
return
yield from self._check_google_sections(lines, definition, docstring)
parse = Parser()
def check(
filenames,
select=None,
ignore=None,
ignore_decorators=None,
property_decorators=None,
ignore_inline_noqa=False,
ignore_self_only_init=False,
):
"""Generate docstring errors that exist in `filenames` iterable.
By default, the PEP-257 convention is checked. To specifically define the
set of error codes to check for, supply either `select` or `ignore` (but
not both). In either case, the parameter should be a collection of error
code strings, e.g., {'D100', 'D404'}.
When supplying `select`, only specified error codes will be reported.
When supplying `ignore`, all error codes which were not specified will be
reported.
Note that ignored error code refer to the entire set of possible
error codes, which is larger than just the PEP-257 convention. To your
convenience, you may use `pydocstyle.violations.conventions.pep257` as
a base set to add or remove errors from.
`ignore_inline_noqa` controls if `# noqa` comments are respected or not.
`ignore_self_only_init` controls if D107 is reported on __init__ only containing `self`.
Examples
---------
>>> check(['pydocstyle.py'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], select=['D100'])
<generator object check at 0x...>
>>> check(['pydocstyle.py'], ignore=conventions.pep257 - {'D100'})
<generator object check at 0x...>
"""
if select is not None and ignore is not None:
raise IllegalConfiguration(
'Cannot pass both select and ignore. '
'They are mutually exclusive.'
)
elif select is not None:
checked_codes = select
elif ignore is not None:
checked_codes = list(
set(violations.ErrorRegistry.get_error_codes()) - set(ignore)
)
else:
checked_codes = violations.conventions.pep257
for filename in filenames:
log.info('Checking file %s.', filename)
try:
with tk.open(filename) as file:
source = file.read()
for error in ConventionChecker().check_source(
source,
filename,
ignore_decorators,
property_decorators,
ignore_inline_noqa,
ignore_self_only_init,
):
code = getattr(error, 'code', None)
if code in checked_codes:
yield error
except (OSError, AllError, ParseError) as error:
log.warning('Error in file %s: %s', filename, error)
yield error
except tk.TokenError:
yield SyntaxError('invalid syntax in file %s' % filename)
def is_ascii(string):
"""Return a boolean indicating if `string` only has ascii characters."""
return all(ord(char) < 128 for char in string)
def leading_space(string):
"""Return any leading space from `string`."""
return re(r'\s*').match(string).group()
def get_leading_words(line):
"""Return any leading set of words from `line`.
For example, if `line` is " Hello world!!!", returns "Hello world".
"""
result = re(r"[\w ]+").match(line.strip())
if result is not None:
return result.group()
def is_def_arg_private(arg_name):
"""Return a boolean indicating if the argument name is private."""
return arg_name.startswith("_")
def get_function_args(function_source):
"""Return the function arguments given the source-code string."""
# We are stripping the whitespace from the left of the
# function source.
# This is so that if the docstring has incorrectly
# indented lines, which are at a lower indent than the
# function source, we still dedent the source correctly
# and the AST parser doesn't throw an error.
try:
function_arg_node = ast.parse(function_source.lstrip()).body[0].args
except SyntaxError:
# If we still get a syntax error, we don't want the
# the checker to crash. Instead we just return a blank list.
return []
arg_nodes = function_arg_node.args
kwonly_arg_nodes = function_arg_node.kwonlyargs
return [arg_node.arg for arg_node in chain(arg_nodes, kwonly_arg_nodes)]
|
ConventionChecker
|
python
|
yaml__pyyaml
|
lib/yaml/cyaml.py
|
{
"start": 493,
"end": 692
}
|
class ____(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
|
CSafeLoader
|
python
|
doocs__leetcode
|
solution/1000-1099/1062.Longest Repeating Substring/Solution.py
|
{
"start": 0,
"end": 368
}
|
class ____:
def longestRepeatingSubstring(self, s: str) -> int:
n = len(s)
f = [[0] * n for _ in range(n)]
ans = 0
for i in range(1, n):
for j in range(i):
if s[i] == s[j]:
f[i][j] = 1 + (f[i - 1][j - 1] if j else 0)
ans = max(ans, f[i][j])
return ans
|
Solution
|
python
|
pytorch__pytorch
|
torch/distributed/checkpoint/_experimental/barriers.py
|
{
"start": 2949,
"end": 4014
}
|
class ____(abc.ABC):
"""
Abstract base class for synchronization barriers.
A barrier ensures that all ranks in a distributed environment reach a certain
point in execution before any rank proceeds further, which is essential for
coordinating operations like checkpointing across multiple processes.
"""
@abc.abstractmethod
def __init__(self, **kwargs: dict[str, Any]):
"""
Initialize a barrier.
Args:
**kwargs: Keyword arguments for specific barrier implementations.
Common arguments may include rank information, barrier prefixes,
timeout settings, and other barrier-specific configuration.
"""
# No implementation needed in the abstract base class
@abc.abstractmethod
def execute_barrier(self) -> None:
"""
Execute a synchronization barrier.
This method uses the barrier_prefix provided during initialization to
coordinate synchronization across processes.
"""
@register_barrier
|
Barrier
|
python
|
pytorch__pytorch
|
test/test_testing.py
|
{
"start": 73446,
"end": 93156
}
|
class ____(TestCase):
def test_unparametrized_names(self, device):
# This test exists to protect against regressions in device / dtype test naming
# due to parametrization logic.
device = self.device_type
class TestParametrized(TestCase):
def test_device_specific(self, device):
pass
@dtypes(torch.float32, torch.float64)
def test_device_dtype_specific(self, device, dtype):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_device_dtype_specific_{}_float32',
'{}.test_device_dtype_specific_{}_float64',
'{}.test_device_specific_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_names(self, device):
# If no param names are passed, ensure things still work without parametrization.
device = self.device_type
class TestParametrized(TestCase):
@parametrize("", [])
def test_foo(self, device):
pass
@parametrize("", range(5))
def test_bar(self, device):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_bar_{}',
'{}.test_foo_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_empty_param_list(self, device):
# If no param values are passed, ensure a helpful error message is thrown.
# In the wild, this could indicate reuse of an exhausted generator.
device = self.device_type
generator = (a for a in range(5))
class TestParametrized(TestCase):
@parametrize("x", generator)
def test_foo(self, device, x):
pass
# Reuse generator from first test function.
@parametrize("y", generator)
def test_bar(self, device, y):
pass
with self.assertRaisesRegex(ValueError, 'An empty arg_values was passed'):
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
def test_default_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", range(5))
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, 2), (2, 3), (3, 4)])
def test_two_things_default_names(self, device, x, y):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_0_{}',
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_2_{}',
'{}.test_default_names_x_3_{}',
'{}.test_default_names_x_4_{}',
'{}.test_two_things_default_names_x_1_y_2_{}',
'{}.test_two_things_default_names_x_2_y_3_{}',
'{}.test_two_things_default_names_x_3_y_4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_default_name_non_primitive(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("x", [1, .5, "foo", object()])
def test_default_names(self, device, x):
pass
@parametrize("x,y", [(1, object()), (object(), .5), (object(), object())])
def test_two_things_default_names(self, device, x, y):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = sorted(name.format(device_cls.__name__, device) for name in (
'{}.test_default_names_x_1_{}',
'{}.test_default_names_x_0_5_{}',
'{}.test_default_names_x_foo_{}',
'{}.test_default_names_x3_{}',
'{}.test_two_things_default_names_x_1_y0_{}',
'{}.test_two_things_default_names_x1_y_0_5_{}',
'{}.test_two_things_default_names_x2_y2_{}')
)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_name_fn(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [False, True], name_fn=lambda b: 'bias' if b else 'no_bias')
def test_custom_names(self, device, bias):
pass
@parametrize("x", [1, 2], name_fn=str)
@parametrize("y", [3, 4], name_fn=str)
@parametrize("z", [5, 6], name_fn=str)
def test_three_things_composition_custom_names(self, device, x, y, z):
pass
@parametrize("x,y", [(1, 2), (1, 3), (1, 4)], name_fn=lambda x, y: f'{x}__{y}')
def test_two_things_custom_names_alternate(self, device, x, y):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_three_things_composition_custom_names_1_3_5_{}',
'{}.test_three_things_composition_custom_names_1_3_6_{}',
'{}.test_three_things_composition_custom_names_1_4_5_{}',
'{}.test_three_things_composition_custom_names_1_4_6_{}',
'{}.test_three_things_composition_custom_names_2_3_5_{}',
'{}.test_three_things_composition_custom_names_2_3_6_{}',
'{}.test_three_things_composition_custom_names_2_4_5_{}',
'{}.test_three_things_composition_custom_names_2_4_6_{}',
'{}.test_two_things_custom_names_alternate_1__2_{}',
'{}.test_two_things_custom_names_alternate_1__3_{}',
'{}.test_two_things_custom_names_alternate_1__4_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_subtest_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@parametrize("bias", [subtest(True, name='bias'),
subtest(False, name='no_bias')])
def test_custom_names(self, device, bias):
pass
@parametrize("x,y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple'),
subtest((1, 4), name='quadruple')])
def test_two_things_custom_names(self, device, x, y):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_custom_names_bias_{}',
'{}.test_custom_names_no_bias_{}',
'{}.test_two_things_custom_names_double_{}',
'{}.test_two_things_custom_names_quadruple_{}',
'{}.test_two_things_custom_names_triple_{}')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(expected_test_names, test_names)
def test_ops_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@ops(op_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_op_parametrized(self, device, dtype, op, flag):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = []
for op in op_db:
for dtype in op.supported_dtypes(torch.device(device).type):
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_name = f'{device_cls.__name__}.test_op_parametrized_{op.formatted_name}_{flag_part}_{device}_{dtype_name(dtype)}' # noqa: B950
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_modules_composition_names(self, device):
device = self.device_type
class TestParametrized(TestCase):
@modules(module_db)
@parametrize("flag", [False, True], lambda f: 'flag_enabled' if f else 'flag_disabled')
def test_module_parametrized(self, device, dtype, module_info, training, flag):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = []
for module_info in module_db:
for dtype in module_info.dtypes:
for flag_part in ('flag_disabled', 'flag_enabled'):
expected_train_modes = (
['train_mode', 'eval_mode'] if module_info.train_and_eval_differ else [''])
for training_part in expected_train_modes:
expected_name = '{}.test_module_parametrized_{}{}_{}_{}_{}'.format(
device_cls.__name__, module_info.formatted_name,
'_' + training_part if len(training_part) > 0 else '',
flag_part, device, dtype_name(dtype))
expected_test_names.append(expected_name)
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_ops_decorator_applies_op_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-op / per-param basis.
# Create a test op, OpInfo entry, and decorator to apply.
def test_op(x):
return -x
def test_dec(func):
func._decorator_applied = True
return func
test_op_info = OpInfo(
'test_op',
op=test_op,
dtypes=floating_types(),
sample_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_op_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@ops(op_db + [test_op_info])
@parametrize("x", [2, 3])
def test_op_param(self, device, dtype, op, x):
pass
@ops(op_db + [test_op_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, op, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.int16)
@ops(op_db)
def test_three(self, device, dtype, op):
pass
device = self.device_type
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_op_param_test_op_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('_int16')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_modules_decorator_applies_module_and_param_specific_decorators(self, device):
# Test that decorators can be applied on a per-module / per-param basis.
# Create a test module, ModuleInfo entry, and decorator to apply.
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.Parameter(torch.randn(3))
def forward(self, y):
return self.x + y
def test_dec(func):
func._decorator_applied = True
return func
test_module_info = ModuleInfo(
TestModule,
module_inputs_func=lambda _: [],
decorators=[
DecorateInfo(test_dec, 'TestParametrized', 'test_module_param',
device_type='cpu', dtypes=[torch.float64],
active_if=lambda p: p['x'] == 2)
])
class TestParametrized(TestCase):
@modules(module_db + [test_module_info])
@parametrize("x", [2, 3])
def test_module_param(self, device, dtype, module_info, training, x):
pass
@modules(module_db + [test_module_info])
@parametrize("y", [
subtest(4),
subtest(5, decorators=[test_dec])])
def test_other(self, device, dtype, module_info, training, y):
pass
@decorateIf(test_dec, lambda p: p['dtype'] == torch.float64)
@modules(module_db)
def test_three(self, device, dtype, module_info):
pass
device = self.device_type
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = (name == 'test_module_param_TestModule_x_2_cpu_float64' or
('test_other' in name and 'y_5' in name) or
('test_three' in name and name.endswith('float64')))
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_param_specific_decoration(self, device):
def test_dec(func):
func._decorator_applied = True
return func
class TestParametrized(TestCase):
@decorateIf(test_dec, lambda params: params["x"] == 1 and params["y"])
@parametrize("x", range(5))
@parametrize("y", [False, True])
def test_param(self, x, y):
pass
device = self.device_type
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
for test_func, name in _get_test_funcs_for_test_class(device_cls):
should_apply = ('test_param_x_1_y_True' in name)
self.assertEqual(hasattr(test_func, '_decorator_applied'), should_apply)
def test_dtypes_composition_valid(self, device):
# Test checks that @parametrize and @dtypes compose as expected when @parametrize
# doesn't set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("x", range(3))
def test_parametrized(self, x, dtype):
pass
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
device_cls = locals_dict[f'TestParametrized{device.upper()}']
expected_test_names = [name.format(device_cls.__name__, device) for name in (
'{}.test_parametrized_x_0_{}_float32',
'{}.test_parametrized_x_0_{}_float64',
'{}.test_parametrized_x_1_{}_float32',
'{}.test_parametrized_x_1_{}_float64',
'{}.test_parametrized_x_2_{}_float32',
'{}.test_parametrized_x_2_{}_float64')
]
test_names = _get_test_names_for_test_class(device_cls)
self.assertEqual(sorted(expected_test_names), sorted(test_names))
def test_dtypes_composition_invalid(self, device):
# Test checks that @dtypes cannot be composed with parametrization decorators when they
# also try to set dtype.
device = self.device_type
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@parametrize("dtype", [torch.int32, torch.int64])
def test_parametrized(self, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
# Verify proper error behavior with @ops + @dtypes, as both try to set dtype.
class TestParametrized(TestCase):
@dtypes(torch.float32, torch.float64)
@ops(op_db)
def test_parametrized(self, op, dtype):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
def test_multiple_handling_of_same_param_error(self, device):
# Test that multiple decorators handling the same param errors out.
# Both @modules and @ops handle the dtype param.
class TestParametrized(TestCase):
@ops(op_db)
@modules(module_db)
def test_param(self, device, dtype, op, module_info, training):
pass
with self.assertRaisesRegex(RuntimeError, "handled multiple times"):
locals_dict = dict(locals())
instantiate_device_type_tests(TestParametrized, locals_dict, only_for=device)
@parametrize("x", [1, subtest(2, decorators=[unittest.expectedFailure]), 3])
def test_subtest_expected_failure(self, device, x):
if x == 2:
raise RuntimeError('Boom')
@parametrize("x", [subtest(1, decorators=[unittest.expectedFailure]), 2, 3])
@parametrize("y", [4, 5, subtest(6, decorators=[unittest.expectedFailure])])
def test_two_things_subtest_expected_failure(self, device, x, y):
if x == 1 or y == 6:
raise RuntimeError('Boom')
instantiate_parametrized_tests(TestTestParametrization)
instantiate_device_type_tests(TestTestParametrizationDeviceType, globals())
|
TestTestParametrizationDeviceType
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_bigquery.py
|
{
"start": 82110,
"end": 89539
}
|
class ____:
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator._validate_records")
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator.defer")
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_check_operator_async_finish_before_deferred(
self, mock_hook, mock_defer, mock_validate_records, create_task_instance_of_operator
):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mocked_job = MagicMock(job_id=real_job_id, error_result=False)
mocked_job.result.return_value = iter([(1, 2, 3)]) # mock rows generator
mock_hook.return_value.insert_job.return_value = mocked_job
mock_hook.return_value.insert_job.return_value.running.return_value = False
ti = create_task_instance_of_operator(
BigQueryCheckOperator,
dag_id="dag_id",
task_id="bq_check_operator_job",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
ti.task.execute(MagicMock())
mock_defer.assert_not_called()
mock_validate_records.assert_called_once_with((1, 2, 3))
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryCheckOperator._validate_records")
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_check_operator_query_parameters_passing(
self, mock_hook, mock_validate_records, create_task_instance_of_operator
):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
query_params = [ScalarQueryParameter("test_param", "INT64", 1)]
mocked_job = MagicMock(job_id=real_job_id, error_result=False)
mocked_job.result.return_value = iter([(1, 2, 3)]) # mock rows generator
mock_hook.return_value.insert_job.return_value = mocked_job
mock_hook.return_value.insert_job.return_value.running.return_value = False
ti = create_task_instance_of_operator(
BigQueryCheckOperator,
dag_id="dag_id",
task_id="bq_check_operator_query_params_job",
sql="SELECT * FROM any WHERE test_param = @test_param",
location=TEST_DATASET_LOCATION,
deferrable=True,
query_params=query_params,
)
ti.task.execute(MagicMock())
mock_validate_records.assert_called_once_with((1, 2, 3))
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_check_operator_async_finish_with_error_before_deferred(
self, mock_hook, create_task_instance_of_operator
):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=True)
mock_hook.return_value.insert_job.return_value.running.return_value = False
ti = create_task_instance_of_operator(
BigQueryCheckOperator,
dag_id="dag_id",
task_id="bq_check_operator_job",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
with pytest.raises(AirflowException) as exc:
ti.task.execute(MagicMock())
assert str(exc.value) == f"BigQuery job {real_job_id} failed: True"
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_check_operator_async(self, mock_hook, create_task_instance_of_operator):
"""
Asserts that a task is deferred and a BigQueryCheckTrigger will be fired
when the BigQueryCheckOperator is executed with deferrable=True.
"""
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
ti = create_task_instance_of_operator(
BigQueryCheckOperator,
dag_id="dag_id",
task_id="bq_check_operator_job",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
with pytest.raises(TaskDeferred) as exc:
ti.task.execute(MagicMock())
assert isinstance(exc.value.trigger, BigQueryCheckTrigger), "Trigger is not a BigQueryCheckTrigger"
def test_bigquery_check_operator_execute_failure(self):
"""Tests that an AirflowException is raised in case of error event"""
operator = BigQueryCheckOperator(
task_id="bq_check_operator_execute_failure",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
with pytest.raises(AirflowException):
operator.execute_complete(
context=None, event={"status": "error", "message": "test failure message"}
)
def test_bigquery_check_operator_project_id(self):
operator = BigQueryCheckOperator(
task_id="bq_check_operator_project_id",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
project_id=TEST_JOB_PROJECT_ID,
)
assert operator.project_id == TEST_JOB_PROJECT_ID
def test_bigquery_check_op_execute_complete_with_no_records(self):
"""Asserts that exception is raised with correct expected exception message"""
operator = BigQueryCheckOperator(
task_id="bq_check_operator_execute_complete",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
with pytest.raises(AirflowException, match="The following query returned zero rows:"):
operator.execute_complete(context=None, event={"status": "success", "records": None})
def test_bigquery_check_op_execute_complete_with_non_boolean_records(self):
"""Executing a sql which returns a non-boolean value should raise exception"""
test_sql = "SELECT * FROM any"
operator = BigQueryCheckOperator(
task_id="bq_check_operator_execute_complete",
sql=test_sql,
location=TEST_DATASET_LOCATION,
deferrable=True,
)
expected_exception_msg = f"Test failed.\nQuery:\n{test_sql}\nResults:\n{[20, False]!s}"
with pytest.raises(AirflowException) as exc:
operator.execute_complete(context=None, event={"status": "success", "records": [20, False]})
assert str(exc.value) == expected_exception_msg
def test_bigquery_check_operator_execute_complete(self):
"""Asserts that logging occurs as expected"""
operator = BigQueryCheckOperator(
task_id="bq_check_operator_execute_complete",
sql="SELECT * FROM any",
location=TEST_DATASET_LOCATION,
deferrable=True,
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context=None, event={"status": "success", "records": [20]})
mock_log_info.assert_called_with("Success.")
|
TestBigQueryCheckOperator
|
python
|
readthedocs__readthedocs.org
|
readthedocs/aws/security_token_service.py
|
{
"start": 1823,
"end": 8809
}
|
class ____(AWSTemporaryCredentials):
"""Subclass of AWSTemporaryCredentials to include S3 specific fields."""
bucket_name: str
region_name: str
def get_sts_client():
return boto3.client(
"sts",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_S3_REGION_NAME,
)
def _get_scoped_credentials(*, session_name, policy, duration) -> AWSTemporaryCredentials:
"""
:param session_name: An identifier to attach to the generated credentials, useful to identify who requested them.
AWS limits the session name to 64 characters, so if the session_name is too long, it will be truncated.
:param duration: The duration of the credentials in seconds. Default is 15 minutes.
Note that the minimum duration time is 15 minutes and the maximum is given by the role (defaults to 1 hour).
:param policy: The inline policy to attach to the generated credentials.
.. note::
If USING_AWS is set to False, this function will return
the values of the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY settings.
Useful for local development where we don't have a service like AWS STS.
"""
if not settings.USING_AWS:
if not settings.DEBUG:
raise ValueError(
"Not returning global credentials, AWS STS should always be used in production."
)
return AWSTemporaryCredentials(
access_key_id=settings.AWS_ACCESS_KEY_ID,
secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
# A session token is not needed for the default credentials.
session_token=None,
)
# Limit to 64 characters, as per AWS limitations.
session_name = session_name[:64]
try:
sts_client = get_sts_client()
response = sts_client.assume_role(
RoleArn=settings.AWS_STS_ASSUME_ROLE_ARN,
RoleSessionName=session_name,
Policy=json.dumps(policy),
DurationSeconds=duration,
)
except Exception:
log.exception(
"Error while assuming role to generate temporary credentials",
session_name=session_name,
policy=policy,
duration=duration,
)
raise AWSTemporaryCredentialsError
credentials = response["Credentials"]
return AWSTemporaryCredentials(
access_key_id=credentials["AccessKeyId"],
secret_access_key=credentials["SecretAccessKey"],
session_token=credentials["SessionToken"],
)
def get_s3_build_media_scoped_credentials(
*,
build,
duration=60 * 15,
) -> AWSS3TemporaryCredentials:
"""
Get temporary credentials with read/write access to the build media bucket.
The credentials are scoped to the paths that the build needs to access.
:duration: The duration of the credentials in seconds. Default is 15 minutes.
Note that the minimum duration time is 15 minutes and the maximum is given by the role (defaults to 1 hour).
"""
project = build.project
version = build.version
bucket_arn = f"arn:aws:s3:::{settings.S3_MEDIA_STORAGE_BUCKET}"
storage_paths = version.get_storage_paths()
# Generate the list of allowed prefix resources
# The resulting prefix looks like:
# - html/project/latest/*
# - pdf/project/latest/*
allowed_prefixes = [f"{storage_path}/*" for storage_path in storage_paths]
# Generate the list of allowed object resources in ARN format.
# The resulting ARN looks like:
# arn:aws:s3:::readthedocs-media/html/project/latest/*
# arn:aws:s3:::readthedocs-media/pdf/project/latest/*
allowed_objects_arn = [f"{bucket_arn}/{prefix}" for prefix in allowed_prefixes]
# Inline policy document to limit the permissions of the temporary credentials.
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
],
"Resource": allowed_objects_arn,
},
# In order to list the objects in a path, we need to allow the ListBucket action.
# But since that action is not scoped to a path, we need to limit it using a condition.
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [
bucket_arn,
],
"Condition": {
"StringLike": {
"s3:prefix": allowed_prefixes,
}
},
},
],
}
session_name = f"rtd-{build.id}-{project.slug}-{version.slug}"
credentials = _get_scoped_credentials(
session_name=session_name,
policy=policy,
duration=duration,
)
return AWSS3TemporaryCredentials(
access_key_id=credentials.access_key_id,
secret_access_key=credentials.secret_access_key,
session_token=credentials.session_token,
region_name=settings.AWS_S3_REGION_NAME,
bucket_name=settings.S3_MEDIA_STORAGE_BUCKET,
)
def get_s3_build_tools_scoped_credentials(
*,
build,
duration=60 * 15,
) -> AWSS3TemporaryCredentials:
"""
Get temporary credentials with read-only access to the build-tools bucket.
:param build: The build to get the credentials for.
:param duration: The duration of the credentials in seconds. Default is 15 minutes.
Note that the minimum duration time is 15 minutes and the maximum is given by the role (defaults to 1 hour).
"""
project = build.project
version = build.version
bucket = settings.S3_BUILD_TOOLS_STORAGE_BUCKET
bucket_arn = f"arn:aws:s3:::{bucket}"
# Inline policy to limit the permissions of the temporary credentials.
# The build-tools bucket is publicly readable, so we don't need to limit the permissions to a specific path.
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:ListBucket",
],
"Resource": [
bucket_arn,
f"{bucket_arn}/*",
],
},
],
}
session_name = f"rtd-{build.id}-{project.slug}-{version.slug}"
credentials = _get_scoped_credentials(
session_name=session_name,
policy=policy,
duration=duration,
)
return AWSS3TemporaryCredentials(
access_key_id=credentials.access_key_id,
secret_access_key=credentials.secret_access_key,
session_token=credentials.session_token,
region_name=settings.AWS_S3_REGION_NAME,
bucket_name=bucket,
)
|
AWSS3TemporaryCredentials
|
python
|
getsentry__sentry
|
fixtures/safe_migrations_apps/bad_flow_add_column_with_default_app/migrations/0001_initial.py
|
{
"start": 153,
"end": 586
}
|
class ____(CheckedMigration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="TestTable",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
],
),
]
|
Migration
|
python
|
numba__numba
|
numba/tests/test_typedlist.py
|
{
"start": 25759,
"end": 28728
}
|
class ____(MemoryLeakMixin, TestCase):
def _cmp_dance(self, expected, pa, pb, na, nb):
# interpreter with regular list
self.assertEqual(cmp.py_func(pa, pb), expected)
# interpreter with typed-list
py_got = cmp.py_func(na, nb)
self.assertEqual(py_got, expected)
# compiled with typed-list
jit_got = cmp(na, nb)
self.assertEqual(jit_got, expected)
def test_empty_vs_empty(self):
pa, pb = [], []
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_empty_vs_singleton(self):
pa, pb = [], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_empty(self):
pa, pb = [0], []
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_equal(self):
pa, pb = [0], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_less_than(self):
pa, pb = [0], [1]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_singleton_vs_singleton_greater_than(self):
pa, pb = [1], [0]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_equal(self):
pa, pb = [1, 2, 3], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = False, True, True, False, True, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_shorter(self):
pa, pb = [1, 2], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_second_shorter(self):
pa, pb = [1, 2, 3], [1, 2]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_less_than(self):
pa, pb = [1, 2, 2], [1, 2, 3]
na, nb = to_tl(pa), to_tl(pb)
expected = True, True, False, True, False, False
self._cmp_dance(expected, pa, pb, na, nb)
def test_first_greater_than(self):
pa, pb = [1, 2, 3], [1, 2, 2]
na, nb = to_tl(pa), to_tl(pb)
expected = False, False, False, True, True, True
self._cmp_dance(expected, pa, pb, na, nb)
def test_equals_non_list(self):
l = to_tl([1, 2, 3])
self.assertFalse(any(cmp.py_func(l, 1)))
self.assertFalse(any(cmp(l, 1)))
|
TestComparisons
|
python
|
pytorch__pytorch
|
torch/_inductor/utils.py
|
{
"start": 47805,
"end": 47890
}
|
class ____:
value: str
line_map: list[tuple[int, LineContext]]
|
ValueWithLineMap
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1180517,
"end": 1184572
}
|
class ____(sgqlc.types.Type, Node, Starrable, UniformResourceLocatable):
"""A Gist."""
__schema__ = github_schema
__field_names__ = (
"comments",
"created_at",
"description",
"files",
"forks",
"is_fork",
"is_public",
"name",
"owner",
"pushed_at",
"updated_at",
)
comments = sgqlc.types.Field(
sgqlc.types.non_null(GistCommentConnection),
graphql_name="comments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of comments associated with the gist
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
description = sgqlc.types.Field(String, graphql_name="description")
"""The gist description."""
files = sgqlc.types.Field(
sgqlc.types.list_of(GistFile),
graphql_name="files",
args=sgqlc.types.ArgDict(
(
("limit", sgqlc.types.Arg(Int, graphql_name="limit", default=10)),
("oid", sgqlc.types.Arg(GitObjectID, graphql_name="oid", default=None)),
)
),
)
"""The files in this gist.
Arguments:
* `limit` (`Int`): The maximum number of files to return.
(default: `10`)
* `oid` (`GitObjectID`): The oid of the files to return
"""
forks = sgqlc.types.Field(
sgqlc.types.non_null(GistConnection),
graphql_name="forks",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(GistOrder, graphql_name="orderBy", default=None)),
)
),
)
"""A list of forks associated with the gist
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`GistOrder`): Ordering options for gists returned
from the connection
"""
is_fork = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isFork")
"""Identifies if the gist is a fork."""
is_public = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPublic")
"""Whether the gist is public or not."""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The gist name."""
owner = sgqlc.types.Field(RepositoryOwner, graphql_name="owner")
"""The gist owner."""
pushed_at = sgqlc.types.Field(DateTime, graphql_name="pushedAt")
"""Identifies when the gist was last pushed to."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
|
Gist
|
python
|
numba__numba
|
numba/tests/test_tuples.py
|
{
"start": 17393,
"end": 17777
}
|
class ____(TestCase, MemoryLeakMixin):
def test_tuple_add(self):
def pyfunc(x):
a = np.arange(3)
return (a,) + (x,)
cfunc = jit(nopython=True)(pyfunc)
x = 123
expect_a, expect_x = pyfunc(x)
got_a, got_x = cfunc(x)
np.testing.assert_equal(got_a, expect_a)
self.assertEqual(got_x, expect_x)
|
TestTupleNRT
|
python
|
google__pytype
|
pytype/rewrite/abstract/functions.py
|
{
"start": 17832,
"end": 19649
}
|
class ____(BaseFunction[_HasReturnT]):
"""Signature-based function implementation."""
def __init__(
self,
ctx: base.ContextType,
name: str,
signatures: tuple[Signature, ...],
module: str | None = None,
):
super().__init__(ctx)
self._name = name
self._signatures = signatures
self.module = module
def __repr__(self):
return f'SimpleFunction({self.full_name})'
@property
def name(self):
return self._name
@property
def full_name(self):
if self.module:
return f'{self.module}.{self._name}'
else:
return self._name
@property
def signatures(self):
return self._signatures
@property
def _attrs(self):
return (self._name, self._signatures)
def map_args(self, args: Args[_FrameT]) -> MappedArgs[_FrameT]:
# TODO(b/241479600): Handle arg mapping failure.
for sig in self.signatures:
return sig.map_args(args)
raise NotImplementedError('No signature matched passed args')
@abc.abstractmethod
def call_with_mapped_args(
self, mapped_args: MappedArgs[FrameType]) -> _HasReturnT:
"""Calls this function with the given mapped arguments.
Args:
mapped_args: The function arguments mapped to parameter names.
Returns:
An object with information about the result of the function call, with a
get_return_value() method that retrieves the return value.
"""
def call(self, args: Args[FrameType]) -> _HasReturnT:
return self.call_with_mapped_args(self.map_args(args))
def analyze_signature(self, sig: Signature) -> _HasReturnT:
assert sig in self.signatures
return self.call_with_mapped_args(sig.make_fake_args())
def analyze(self) -> Sequence[_HasReturnT]:
return [self.analyze_signature(sig) for sig in self.signatures]
|
SimpleFunction
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/index_object.py
|
{
"start": 165,
"end": 1408
}
|
class ____:
params = (
["monotonic", "non_monotonic"],
["datetime", "date_string", "int", "strings", "ea_int"],
["intersection", "union", "symmetric_difference"],
)
param_names = ["index_structure", "dtype", "method"]
def setup(self, index_structure, dtype, method):
N = 10**5
dates_left = date_range("1/1/2000", periods=N, freq="min")
fmt = "%Y-%m-%d %H:%M:%S"
date_str_left = Index(dates_left.strftime(fmt))
int_left = Index(np.arange(N))
ea_int_left = Index(np.arange(N), dtype="Int64")
str_left = Index([f"i-{i}" for i in range(N)], dtype=object)
data = {
"datetime": dates_left,
"date_string": date_str_left,
"int": int_left,
"strings": str_left,
"ea_int": ea_int_left,
}
if index_structure == "non_monotonic":
data = {k: mi[::-1] for k, mi in data.items()}
data = {k: {"left": idx, "right": idx[:-1]} for k, idx in data.items()}
self.left = data[dtype]["left"]
self.right = data[dtype]["right"]
def time_operation(self, index_structure, dtype, method):
getattr(self.left, method)(self.right)
|
SetOperations
|
python
|
pytransitions__transitions
|
transitions/extensions/locking.py
|
{
"start": 1178,
"end": 1668
}
|
class ____:
"""A wrapper for threading.Lock which discards its state during pickling and
is reinitialized unlocked when unpickled.
"""
def __init__(self):
self.lock = Lock()
def __getstate__(self):
return ''
def __setstate__(self, value):
return self.__init__()
def __enter__(self):
self.lock.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.lock.__exit__(exc_type, exc_val, exc_tb)
|
PicklableLock
|
python
|
wandb__wandb
|
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
|
{
"start": 33116,
"end": 33925
}
|
class ____(TypeSystemDefinition):
__slots__ = ('loc', 'definition',)
_fields = ('definition',)
def __init__(self, definition, loc=None):
self.loc = loc
self.definition = definition
def __eq__(self, other):
return (
self is other or (
isinstance(other, TypeExtensionDefinition) and
# self.loc == other.loc and
self.definition == other.definition
)
)
def __repr__(self):
return ('TypeExtensionDefinition('
'definition={self.definition!r}'
')').format(self=self)
def __copy__(self):
return type(self)(
self.definition,
self.loc
)
def __hash__(self):
return id(self)
|
TypeExtensionDefinition
|
python
|
mlflow__mlflow
|
mlflow/environment_variables.py
|
{
"start": 1752,
"end": 51284
}
|
class ____(_EnvironmentVariable):
"""
Represents a boolean environment variable.
"""
def __init__(self, name, default):
# `default not in [True, False, None]` doesn't work because `1 in [True]`
# (or `0 in [False]`) returns True.
if not (default is True or default is False or default is None):
raise ValueError(f"{name} default value must be one of [True, False, None]")
super().__init__(name, bool, default)
def get(self):
# TODO: Remove this block in MLflow 3.2.0
if self.name == MLFLOW_CONFIGURE_LOGGING.name and (
val := os.getenv("MLFLOW_LOGGING_CONFIGURE_LOGGING")
):
warnings.warn(
"Environment variable MLFLOW_LOGGING_CONFIGURE_LOGGING is deprecated and will be "
f"removed in a future release. Please use {MLFLOW_CONFIGURE_LOGGING.name} instead.",
FutureWarning,
stacklevel=2,
)
return val.lower() in ["true", "1"]
if not self.defined:
return self.default
val = os.getenv(self.name)
lowercased = val.lower()
if lowercased not in ["true", "false", "1", "0"]:
raise ValueError(
f"{self.name} value must be one of ['true', 'false', '1', '0'] (case-insensitive), "
f"but got {val}"
)
return lowercased in ["true", "1"]
#: Specifies the tracking URI.
#: (default: ``None``)
MLFLOW_TRACKING_URI = _EnvironmentVariable("MLFLOW_TRACKING_URI", str, None)
#: Specifies the registry URI.
#: (default: ``None``)
MLFLOW_REGISTRY_URI = _EnvironmentVariable("MLFLOW_REGISTRY_URI", str, None)
#: Specifies the ``dfs_tmpdir`` parameter to use for ``mlflow.spark.save_model``,
#: ``mlflow.spark.log_model`` and ``mlflow.spark.load_model``. See
#: https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.save_model
#: for more information.
#: (default: ``/tmp/mlflow``)
MLFLOW_DFS_TMP = _EnvironmentVariable("MLFLOW_DFS_TMP", str, "/tmp/mlflow")
#: Specifies the maximum number of retries with exponential backoff for MLflow HTTP requests
#: (default: ``7``)
MLFLOW_HTTP_REQUEST_MAX_RETRIES = _EnvironmentVariable(
"MLFLOW_HTTP_REQUEST_MAX_RETRIES",
int,
# Important: It's common for MLflow backends to rate limit requests for more than 1 minute.
# To remain resilient to rate limiting, the MLflow client needs to retry for more than 1
# minute. Assuming 2 seconds per retry, 7 retries with backoff will take ~ 4 minutes,
# which is appropriate for most rate limiting scenarios
7,
)
#: Specifies the backoff increase factor between MLflow HTTP request failures
#: (default: ``2``)
MLFLOW_HTTP_REQUEST_BACKOFF_FACTOR = _EnvironmentVariable(
"MLFLOW_HTTP_REQUEST_BACKOFF_FACTOR", int, 2
)
#: Specifies the backoff jitter between MLflow HTTP request failures
#: (default: ``1.0``)
MLFLOW_HTTP_REQUEST_BACKOFF_JITTER = _EnvironmentVariable(
"MLFLOW_HTTP_REQUEST_BACKOFF_JITTER", float, 1.0
)
#: Specifies the timeout in seconds for MLflow HTTP requests
#: (default: ``120``)
MLFLOW_HTTP_REQUEST_TIMEOUT = _EnvironmentVariable("MLFLOW_HTTP_REQUEST_TIMEOUT", int, 120)
#: Specifies the timeout in seconds for MLflow deployment client HTTP requests
#: (non-predict operations). This is separate from MLFLOW_HTTP_REQUEST_TIMEOUT to allow
#: longer timeouts for LLM calls (default: ``300``)
MLFLOW_DEPLOYMENT_CLIENT_HTTP_REQUEST_TIMEOUT = _EnvironmentVariable(
"MLFLOW_DEPLOYMENT_CLIENT_HTTP_REQUEST_TIMEOUT", int, 300
)
#: Specifies whether to respect Retry-After header on status codes defined as
#: Retry.RETRY_AFTER_STATUS_CODES or not for MLflow HTTP request
#: (default: ``True``)
MLFLOW_HTTP_RESPECT_RETRY_AFTER_HEADER = _BooleanEnvironmentVariable(
"MLFLOW_HTTP_RESPECT_RETRY_AFTER_HEADER", True
)
#: Internal-only configuration that sets an upper bound to the allowable maximum
#: retries for HTTP requests
#: (default: ``10``)
_MLFLOW_HTTP_REQUEST_MAX_RETRIES_LIMIT = _EnvironmentVariable(
"_MLFLOW_HTTP_REQUEST_MAX_RETRIES_LIMIT", int, 10
)
#: Internal-only configuration that sets the upper bound for an HTTP backoff_factor
#: (default: ``120``)
_MLFLOW_HTTP_REQUEST_MAX_BACKOFF_FACTOR_LIMIT = _EnvironmentVariable(
"_MLFLOW_HTTP_REQUEST_MAX_BACKOFF_FACTOR_LIMIT", int, 120
)
#: Specifies whether MLflow HTTP requests should be signed using AWS signature V4. It will overwrite
#: (default: ``False``). When set, it will overwrite the "Authorization" HTTP header.
#: See https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html for more information.
MLFLOW_TRACKING_AWS_SIGV4 = _BooleanEnvironmentVariable("MLFLOW_TRACKING_AWS_SIGV4", False)
#: Specifies the auth provider to sign the MLflow HTTP request
#: (default: ``None``). When set, it will overwrite the "Authorization" HTTP header.
MLFLOW_TRACKING_AUTH = _EnvironmentVariable("MLFLOW_TRACKING_AUTH", str, None)
#: Specifies the chunk size to use when downloading a file from GCS
#: (default: ``None``). If None, the chunk size is automatically determined by the
#: ``google-cloud-storage`` package.
MLFLOW_GCS_DOWNLOAD_CHUNK_SIZE = _EnvironmentVariable("MLFLOW_GCS_DOWNLOAD_CHUNK_SIZE", int, None)
#: Specifies the chunk size to use when uploading a file to GCS.
#: (default: ``None``). If None, the chunk size is automatically determined by the
#: ``google-cloud-storage`` package.
MLFLOW_GCS_UPLOAD_CHUNK_SIZE = _EnvironmentVariable("MLFLOW_GCS_UPLOAD_CHUNK_SIZE", int, None)
#: Specifies whether to disable model logging and loading via mlflowdbfs.
#: (default: ``None``)
_DISABLE_MLFLOWDBFS = _EnvironmentVariable("DISABLE_MLFLOWDBFS", str, None)
#: Specifies the S3 endpoint URL to use for S3 artifact operations.
#: (default: ``None``)
MLFLOW_S3_ENDPOINT_URL = _EnvironmentVariable("MLFLOW_S3_ENDPOINT_URL", str, None)
#: Specifies whether or not to skip TLS certificate verification for S3 artifact operations.
#: (default: ``False``)
MLFLOW_S3_IGNORE_TLS = _BooleanEnvironmentVariable("MLFLOW_S3_IGNORE_TLS", False)
#: Specifies extra arguments for S3 artifact uploads.
#: (default: ``None``)
MLFLOW_S3_UPLOAD_EXTRA_ARGS = _EnvironmentVariable("MLFLOW_S3_UPLOAD_EXTRA_ARGS", str, None)
#: Specifies the expected AWS account ID that owns the S3 bucket for bucket ownership verification.
#: When set, all S3 API calls will include the ExpectedBucketOwner parameter to prevent
#: bucket takeover attacks. This helps protect against scenarios where a bucket is deleted
#: and recreated by a different AWS account with the same name.
#: (default: ``None``)
MLFLOW_S3_EXPECTED_BUCKET_OWNER = _EnvironmentVariable("MLFLOW_S3_EXPECTED_BUCKET_OWNER", str, None)
#: Specifies the location of a Kerberos ticket cache to use for HDFS artifact operations.
#: (default: ``None``)
MLFLOW_KERBEROS_TICKET_CACHE = _EnvironmentVariable("MLFLOW_KERBEROS_TICKET_CACHE", str, None)
#: Specifies a Kerberos user for HDFS artifact operations.
#: (default: ``None``)
MLFLOW_KERBEROS_USER = _EnvironmentVariable("MLFLOW_KERBEROS_USER", str, None)
#: Specifies extra pyarrow configurations for HDFS artifact operations.
#: (default: ``None``)
MLFLOW_PYARROW_EXTRA_CONF = _EnvironmentVariable("MLFLOW_PYARROW_EXTRA_CONF", str, None)
#: Specifies the ``pool_size`` parameter to use for ``sqlalchemy.create_engine`` in the SQLAlchemy
#: tracking store. See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.pool_size
#: for more information.
#: (default: ``None``)
MLFLOW_SQLALCHEMYSTORE_POOL_SIZE = _EnvironmentVariable(
"MLFLOW_SQLALCHEMYSTORE_POOL_SIZE", int, None
)
#: Specifies the ``pool_recycle`` parameter to use for ``sqlalchemy.create_engine`` in the
#: SQLAlchemy tracking store. See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.pool_recycle
#: for more information.
#: (default: ``None``)
MLFLOW_SQLALCHEMYSTORE_POOL_RECYCLE = _EnvironmentVariable(
"MLFLOW_SQLALCHEMYSTORE_POOL_RECYCLE", int, None
)
#: Specifies the ``max_overflow`` parameter to use for ``sqlalchemy.create_engine`` in the
#: SQLAlchemy tracking store. See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.max_overflow
#: for more information.
#: (default: ``None``)
MLFLOW_SQLALCHEMYSTORE_MAX_OVERFLOW = _EnvironmentVariable(
"MLFLOW_SQLALCHEMYSTORE_MAX_OVERFLOW", int, None
)
#: Specifies the ``echo`` parameter to use for ``sqlalchemy.create_engine`` in the
#: SQLAlchemy tracking store. See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.echo
#: for more information.
#: (default: ``False``)
MLFLOW_SQLALCHEMYSTORE_ECHO = _BooleanEnvironmentVariable("MLFLOW_SQLALCHEMYSTORE_ECHO", False)
#: Specifies whether or not to print a warning when `--env-manager=conda` is specified.
#: (default: ``False``)
MLFLOW_DISABLE_ENV_MANAGER_CONDA_WARNING = _BooleanEnvironmentVariable(
"MLFLOW_DISABLE_ENV_MANAGER_CONDA_WARNING", False
)
#: Specifies the ``poolclass`` parameter to use for ``sqlalchemy.create_engine`` in the
#: SQLAlchemy tracking store. See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine.params.poolclass
#: for more information.
#: (default: ``None``)
MLFLOW_SQLALCHEMYSTORE_POOLCLASS = _EnvironmentVariable(
"MLFLOW_SQLALCHEMYSTORE_POOLCLASS", str, None
)
#: Specifies the ``timeout_seconds`` for MLflow Model dependency inference operations.
#: (default: ``120``)
MLFLOW_REQUIREMENTS_INFERENCE_TIMEOUT = _EnvironmentVariable(
"MLFLOW_REQUIREMENTS_INFERENCE_TIMEOUT", int, 120
)
#: Specifies the MLflow Model Scoring server request timeout in seconds
#: (default: ``60``)
MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT = _EnvironmentVariable(
"MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT", int, 60
)
#: (Experimental, may be changed or removed)
#: Specifies the timeout to use when uploading or downloading a file
#: (default: ``None``). If None, individual artifact stores will choose defaults.
MLFLOW_ARTIFACT_UPLOAD_DOWNLOAD_TIMEOUT = _EnvironmentVariable(
"MLFLOW_ARTIFACT_UPLOAD_DOWNLOAD_TIMEOUT", int, None
)
#: Specifies the timeout for model inference with input example(s) when logging/saving a model.
#: MLflow runs a few inference requests against the model to infer model signature and pip
#: requirements. Sometimes the prediction hangs for a long time, especially for a large model.
#: This timeout limits the allowable time for performing a prediction for signature inference
#: and will abort the prediction, falling back to the default signature and pip requirements.
MLFLOW_INPUT_EXAMPLE_INFERENCE_TIMEOUT = _EnvironmentVariable(
"MLFLOW_INPUT_EXAMPLE_INFERENCE_TIMEOUT", int, 180
)
#: Specifies the device intended for use in the predict function - can be used
#: to override behavior where the GPU is used by default when available by
#: setting this environment variable to be ``cpu``. Currently, this
#: variable is only supported for the MLflow PyTorch and HuggingFace flavors.
#: For the HuggingFace flavor, note that device must be parseable as an integer.
MLFLOW_DEFAULT_PREDICTION_DEVICE = _EnvironmentVariable(
"MLFLOW_DEFAULT_PREDICTION_DEVICE", str, None
)
#: Specifies to Huggingface whether to use the automatic device placement logic of
# HuggingFace accelerate. If it's set to false, the low_cpu_mem_usage flag will not be
# set to True and device_map will not be set to "auto".
MLFLOW_HUGGINGFACE_DISABLE_ACCELERATE_FEATURES = _BooleanEnvironmentVariable(
"MLFLOW_DISABLE_HUGGINGFACE_ACCELERATE_FEATURES", False
)
#: Specifies to Huggingface whether to use the automatic device placement logic of
# HuggingFace accelerate. If it's set to false, the low_cpu_mem_usage flag will not be
# set to True and device_map will not be set to "auto". Default to False.
MLFLOW_HUGGINGFACE_USE_DEVICE_MAP = _BooleanEnvironmentVariable(
"MLFLOW_HUGGINGFACE_USE_DEVICE_MAP", False
)
#: Specifies to Huggingface to use the automatic device placement logic of HuggingFace accelerate.
#: This can be set to values supported by the version of HuggingFace Accelerate being installed.
MLFLOW_HUGGINGFACE_DEVICE_MAP_STRATEGY = _EnvironmentVariable(
"MLFLOW_HUGGINGFACE_DEVICE_MAP_STRATEGY", str, "auto"
)
#: Specifies to Huggingface to use the low_cpu_mem_usage flag powered by HuggingFace accelerate.
#: If it's set to false, the low_cpu_mem_usage flag will be set to False.
MLFLOW_HUGGINGFACE_USE_LOW_CPU_MEM_USAGE = _BooleanEnvironmentVariable(
"MLFLOW_HUGGINGFACE_USE_LOW_CPU_MEM_USAGE", True
)
#: Specifies the max_shard_size to use when mlflow transformers flavor saves the model checkpoint.
#: This can be set to override the 500MB default.
MLFLOW_HUGGINGFACE_MODEL_MAX_SHARD_SIZE = _EnvironmentVariable(
"MLFLOW_HUGGINGFACE_MODEL_MAX_SHARD_SIZE", str, "500MB"
)
#: Specifies the name of the Databricks secret scope to use for storing OpenAI API keys.
MLFLOW_OPENAI_SECRET_SCOPE = _EnvironmentVariable("MLFLOW_OPENAI_SECRET_SCOPE", str, None)
#: (Experimental, may be changed or removed)
#: Specifies the download options to be used by pip wheel when `add_libraries_to_model` is used to
#: create and log model dependencies as model artifacts. The default behavior only uses dependency
#: binaries and no source packages.
#: (default: ``--only-binary=:all:``).
MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS = _EnvironmentVariable(
"MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS", str, "--only-binary=:all:"
)
# Specifies whether or not to use multipart download when downloading a large file on Databricks.
MLFLOW_ENABLE_MULTIPART_DOWNLOAD = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_MULTIPART_DOWNLOAD", True
)
# Specifies whether or not to use multipart upload when uploading large artifacts.
MLFLOW_ENABLE_MULTIPART_UPLOAD = _BooleanEnvironmentVariable("MLFLOW_ENABLE_MULTIPART_UPLOAD", True)
#: Specifies whether or not to use multipart upload for proxied artifact access.
#: (default: ``False``)
MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_PROXY_MULTIPART_UPLOAD", False
)
#: Private environment variable that's set to ``True`` while running tests.
_MLFLOW_TESTING = _BooleanEnvironmentVariable("MLFLOW_TESTING", False)
#: Specifies the username used to authenticate with a tracking server.
#: (default: ``None``)
MLFLOW_TRACKING_USERNAME = _EnvironmentVariable("MLFLOW_TRACKING_USERNAME", str, None)
#: Specifies the password used to authenticate with a tracking server.
#: (default: ``None``)
MLFLOW_TRACKING_PASSWORD = _EnvironmentVariable("MLFLOW_TRACKING_PASSWORD", str, None)
#: Specifies and takes precedence for setting the basic/bearer auth on http requests.
#: (default: ``None``)
MLFLOW_TRACKING_TOKEN = _EnvironmentVariable("MLFLOW_TRACKING_TOKEN", str, None)
#: Specifies whether to verify TLS connection in ``requests.request`` function,
#: see https://requests.readthedocs.io/en/master/api/
#: (default: ``False``).
MLFLOW_TRACKING_INSECURE_TLS = _BooleanEnvironmentVariable("MLFLOW_TRACKING_INSECURE_TLS", False)
#: Sets the ``verify`` param in ``requests.request`` function,
#: see https://requests.readthedocs.io/en/master/api/
#: (default: ``None``)
MLFLOW_TRACKING_SERVER_CERT_PATH = _EnvironmentVariable(
"MLFLOW_TRACKING_SERVER_CERT_PATH", str, None
)
#: Sets the ``cert`` param in ``requests.request`` function,
#: see https://requests.readthedocs.io/en/master/api/
#: (default: ``None``)
MLFLOW_TRACKING_CLIENT_CERT_PATH = _EnvironmentVariable(
"MLFLOW_TRACKING_CLIENT_CERT_PATH", str, None
)
#: Specified the ID of the run to log data to.
#: (default: ``None``)
MLFLOW_RUN_ID = _EnvironmentVariable("MLFLOW_RUN_ID", str, None)
#: Specifies the default root directory for tracking `FileStore`.
#: (default: ``None``)
MLFLOW_TRACKING_DIR = _EnvironmentVariable("MLFLOW_TRACKING_DIR", str, None)
#: Specifies the default root directory for registry `FileStore`.
#: (default: ``None``)
MLFLOW_REGISTRY_DIR = _EnvironmentVariable("MLFLOW_REGISTRY_DIR", str, None)
#: Specifies the default experiment ID to create run to.
#: (default: ``None``)
MLFLOW_EXPERIMENT_ID = _EnvironmentVariable("MLFLOW_EXPERIMENT_ID", str, None)
#: Specifies the default experiment name to create run to.
#: (default: ``None``)
MLFLOW_EXPERIMENT_NAME = _EnvironmentVariable("MLFLOW_EXPERIMENT_NAME", str, None)
#: Specified the path to the configuration file for MLflow Authentication.
#: (default: ``None``)
MLFLOW_AUTH_CONFIG_PATH = _EnvironmentVariable("MLFLOW_AUTH_CONFIG_PATH", str, None)
#: Specifies and takes precedence for setting the UC OSS basic/bearer auth on http requests.
#: (default: ``None``)
MLFLOW_UC_OSS_TOKEN = _EnvironmentVariable("MLFLOW_UC_OSS_TOKEN", str, None)
#: Specifies the root directory to create Python virtual environments in.
#: (default: ``~/.mlflow/envs``)
MLFLOW_ENV_ROOT = _EnvironmentVariable(
"MLFLOW_ENV_ROOT", str, str(Path.home().joinpath(".mlflow", "envs"))
)
#: Specifies whether or not to use DBFS FUSE mount to store artifacts on Databricks
#: (default: ``False``)
MLFLOW_ENABLE_DBFS_FUSE_ARTIFACT_REPO = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_DBFS_FUSE_ARTIFACT_REPO", True
)
#: Specifies whether or not to use UC Volume FUSE mount to store artifacts on Databricks
#: (default: ``True``)
MLFLOW_ENABLE_UC_VOLUME_FUSE_ARTIFACT_REPO = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_UC_VOLUME_FUSE_ARTIFACT_REPO", True
)
#: Private environment variable that should be set to ``True`` when running autologging tests.
#: (default: ``False``)
_MLFLOW_AUTOLOGGING_TESTING = _BooleanEnvironmentVariable("MLFLOW_AUTOLOGGING_TESTING", False)
#: (Experimental, may be changed or removed)
#: Specifies the uri of a MLflow Gateway Server instance to be used with the Gateway Client APIs
#: (default: ``None``)
MLFLOW_GATEWAY_URI = _EnvironmentVariable("MLFLOW_GATEWAY_URI", str, None)
#: (Experimental, may be changed or removed)
#: Specifies the uri of an MLflow AI Gateway instance to be used with the Deployments
#: Client APIs
#: (default: ``None``)
MLFLOW_DEPLOYMENTS_TARGET = _EnvironmentVariable("MLFLOW_DEPLOYMENTS_TARGET", str, None)
#: Specifies the path of the config file for MLflow AI Gateway.
#: (default: ``None``)
MLFLOW_GATEWAY_CONFIG = _EnvironmentVariable("MLFLOW_GATEWAY_CONFIG", str, None)
#: Specifies the path of the config file for MLflow AI Gateway.
#: (default: ``None``)
MLFLOW_DEPLOYMENTS_CONFIG = _EnvironmentVariable("MLFLOW_DEPLOYMENTS_CONFIG", str, None)
#: Specifies whether to display the progress bar when uploading/downloading artifacts.
#: (default: ``True``)
MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR", True
)
#: Specifies the conda home directory to use.
#: (default: ``conda``)
MLFLOW_CONDA_HOME = _EnvironmentVariable("MLFLOW_CONDA_HOME", str, None)
#: Specifies the name of the command to use when creating the environments.
#: For example, let's say we want to use mamba (https://github.com/mamba-org/mamba)
#: instead of conda to create environments.
#: Then: > conda install mamba -n base -c conda-forge
#: If not set, use the same as conda_path
#: (default: ``conda``)
MLFLOW_CONDA_CREATE_ENV_CMD = _EnvironmentVariable("MLFLOW_CONDA_CREATE_ENV_CMD", str, "conda")
#: Specifies the flavor to serve in the scoring server.
#: (default ``None``)
MLFLOW_DEPLOYMENT_FLAVOR_NAME = _EnvironmentVariable("MLFLOW_DEPLOYMENT_FLAVOR_NAME", str, None)
#: Specifies the MLflow Run context
#: (default: ``None``)
MLFLOW_RUN_CONTEXT = _EnvironmentVariable("MLFLOW_RUN_CONTEXT", str, None)
#: Specifies the URL of the ECR-hosted Docker image a model is deployed into for SageMaker.
# (default: ``None``)
MLFLOW_SAGEMAKER_DEPLOY_IMG_URL = _EnvironmentVariable("MLFLOW_SAGEMAKER_DEPLOY_IMG_URL", str, None)
#: Specifies whether to disable creating a new conda environment for `mlflow models build-docker`.
#: (default: ``False``)
MLFLOW_DISABLE_ENV_CREATION = _BooleanEnvironmentVariable("MLFLOW_DISABLE_ENV_CREATION", False)
#: Specifies the timeout value for downloading chunks of mlflow artifacts.
#: (default: ``300``)
MLFLOW_DOWNLOAD_CHUNK_TIMEOUT = _EnvironmentVariable("MLFLOW_DOWNLOAD_CHUNK_TIMEOUT", int, 300)
#: Specifies if system metrics logging should be enabled.
MLFLOW_ENABLE_SYSTEM_METRICS_LOGGING = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_SYSTEM_METRICS_LOGGING", False
)
#: Specifies the sampling interval for system metrics logging.
MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL = _EnvironmentVariable(
"MLFLOW_SYSTEM_METRICS_SAMPLING_INTERVAL", float, None
)
#: Specifies the number of samples before logging system metrics.
MLFLOW_SYSTEM_METRICS_SAMPLES_BEFORE_LOGGING = _EnvironmentVariable(
"MLFLOW_SYSTEM_METRICS_SAMPLES_BEFORE_LOGGING", int, None
)
#: Specifies the node id of system metrics logging. This is useful in multi-node (distributed
#: training) setup.
MLFLOW_SYSTEM_METRICS_NODE_ID = _EnvironmentVariable("MLFLOW_SYSTEM_METRICS_NODE_ID", str, None)
# Private environment variable to specify the number of chunk download retries for multipart
# download.
_MLFLOW_MPD_NUM_RETRIES = _EnvironmentVariable("_MLFLOW_MPD_NUM_RETRIES", int, 3)
# Private environment variable to specify the interval between chunk download retries for multipart
# download.
_MLFLOW_MPD_RETRY_INTERVAL_SECONDS = _EnvironmentVariable(
"_MLFLOW_MPD_RETRY_INTERVAL_SECONDS", int, 1
)
#: Specifies the minimum file size in bytes to use multipart upload when logging artifacts
#: (default: ``524_288_000`` (500 MB))
MLFLOW_MULTIPART_UPLOAD_MINIMUM_FILE_SIZE = _EnvironmentVariable(
"MLFLOW_MULTIPART_UPLOAD_MINIMUM_FILE_SIZE", int, 500 * 1024**2
)
#: Specifies the minimum file size in bytes to use multipart download when downloading artifacts
#: (default: ``524_288_000`` (500 MB))
MLFLOW_MULTIPART_DOWNLOAD_MINIMUM_FILE_SIZE = _EnvironmentVariable(
"MLFLOW_MULTIPART_DOWNLOAD_MINIMUM_FILE_SIZE", int, 500 * 1024**2
)
#: Specifies the chunk size in bytes to use when performing multipart upload
#: (default: ``104_857_60`` (10 MB))
MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE = _EnvironmentVariable(
"MLFLOW_MULTIPART_UPLOAD_CHUNK_SIZE", int, 10 * 1024**2
)
#: Specifies the chunk size in bytes to use when performing multipart download
#: (default: ``104_857_600`` (100 MB))
MLFLOW_MULTIPART_DOWNLOAD_CHUNK_SIZE = _EnvironmentVariable(
"MLFLOW_MULTIPART_DOWNLOAD_CHUNK_SIZE", int, 100 * 1024**2
)
#: Specifies whether or not to allow the MLflow server to follow redirects when
#: making HTTP requests. If set to False, the server will throw an exception if it
#: encounters a redirect response.
#: (default: ``True``)
MLFLOW_ALLOW_HTTP_REDIRECTS = _BooleanEnvironmentVariable("MLFLOW_ALLOW_HTTP_REDIRECTS", True)
#: Timeout for a SINGLE HTTP request to a deployment endpoint (in seconds).
#: This controls how long ONE individual predict/predict_stream request can take before timing out.
#: If your model inference takes longer than this (e.g., long-running agent queries that take
#: several minutes), you MUST increase this value to allow the single request to complete.
#: For example, if your longest query takes 5 minutes, set this to at least 300 seconds.
#: Used within the `predict` and `predict_stream` APIs.
#: (default: ``120``)
MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT = _EnvironmentVariable(
"MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT", int, 120
)
#: TOTAL time limit for ALL retry attempts combined (in seconds).
#: This controls how long the client will keep retrying failed requests across ALL attempts
#: before giving up entirely. This is SEPARATE from MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT, which
#: controls how long a SINGLE request can run, while this variable controls the TOTAL time
#: for ALL retries. For long-running operations that may also experience transient failures,
#: ensure BOTH timeouts are set appropriately. This value should be greater than or equal to
#: MLFLOW_DEPLOYMENT_PREDICT_TIMEOUT.
#: (default: ``600``)
MLFLOW_DEPLOYMENT_PREDICT_TOTAL_TIMEOUT = _EnvironmentVariable(
"MLFLOW_DEPLOYMENT_PREDICT_TOTAL_TIMEOUT", int, 600
)
MLFLOW_GATEWAY_RATE_LIMITS_STORAGE_URI = _EnvironmentVariable(
"MLFLOW_GATEWAY_RATE_LIMITS_STORAGE_URI", str, None
)
#: If True, MLflow fluent logging APIs, e.g., `mlflow.log_metric` will log asynchronously.
MLFLOW_ENABLE_ASYNC_LOGGING = _BooleanEnvironmentVariable("MLFLOW_ENABLE_ASYNC_LOGGING", False)
#: Number of workers in the thread pool used for asynchronous logging, defaults to 10.
MLFLOW_ASYNC_LOGGING_THREADPOOL_SIZE = _EnvironmentVariable(
"MLFLOW_ASYNC_LOGGING_THREADPOOL_SIZE", int, 10
)
#: Specifies whether or not to have mlflow configure logging on import.
#: If set to True, mlflow will configure ``mlflow.<module_name>`` loggers with
#: logging handlers and formatters.
#: (default: ``True``)
MLFLOW_CONFIGURE_LOGGING = _BooleanEnvironmentVariable("MLFLOW_CONFIGURE_LOGGING", True)
#: If set to True, the following entities will be truncated to their maximum length:
#: - Param value
#: - Tag value
#: If set to False, an exception will be raised if the length of the entity exceeds the maximum
#: length.
#: (default: ``True``)
MLFLOW_TRUNCATE_LONG_VALUES = _BooleanEnvironmentVariable("MLFLOW_TRUNCATE_LONG_VALUES", True)
# Whether to run slow tests with pytest. Default to False in normal runs,
# but set to True in the weekly slow test jobs.
_MLFLOW_RUN_SLOW_TESTS = _BooleanEnvironmentVariable("MLFLOW_RUN_SLOW_TESTS", False)
#: The OpenJDK version to install in the Docker image used for MLflow models.
#: (default: ``11``)
MLFLOW_DOCKER_OPENJDK_VERSION = _EnvironmentVariable("MLFLOW_DOCKER_OPENJDK_VERSION", str, "11")
#: How long a trace can be "in-progress". When this is set to a positive value and a trace is
#: not completed within this time, it will be automatically halted and exported to the specified
#: backend destination with status "ERROR".
MLFLOW_TRACE_TIMEOUT_SECONDS = _EnvironmentVariable("MLFLOW_TRACE_TIMEOUT_SECONDS", int, None)
#: How frequently to check for timed-out traces. For example, if this is set to 10, MLflow will
#: check for timed-out traces every 10 seconds (in a background worker) and halt any traces that
#: have exceeded the timeout. This is only effective if MLFLOW_TRACE_TIMEOUT_SECONDS is set to a
#: positive value.
MLFLOW_TRACE_TIMEOUT_CHECK_INTERVAL_SECONDS = _EnvironmentVariable(
"MLFLOW_TRACE_TIMEOUT_CHECK_INTERVAL_SECONDS", int, 1
)
# How long a trace can be buffered in-memory at client side before being abandoned.
MLFLOW_TRACE_BUFFER_TTL_SECONDS = _EnvironmentVariable("MLFLOW_TRACE_BUFFER_TTL_SECONDS", int, 3600)
# How many traces to be buffered in-memory at client side before being abandoned.
MLFLOW_TRACE_BUFFER_MAX_SIZE = _EnvironmentVariable("MLFLOW_TRACE_BUFFER_MAX_SIZE", int, 1000)
#: Maximum number of prompt versions to cache in the LRU cache for _load_prompt_version_cached.
#: This cache improves performance by avoiding repeated network calls for the same prompt version.
#: (default: ``128``)
MLFLOW_PROMPT_CACHE_MAX_SIZE = _EnvironmentVariable("MLFLOW_PROMPT_CACHE_MAX_SIZE", int, 128)
#: Private configuration option.
#: Enables the ability to catch exceptions within MLflow evaluate for classification models
#: where a class imbalance due to a missing target class would raise an error in the
#: underlying metrology modules (scikit-learn). If set to True, specific exceptions will be
#: caught, alerted via the warnings module, and evaluation will resume.
#: (default: ``False``)
_MLFLOW_EVALUATE_SUPPRESS_CLASSIFICATION_ERRORS = _BooleanEnvironmentVariable(
"_MLFLOW_EVALUATE_SUPPRESS_CLASSIFICATION_ERRORS", False
)
#: Maximum number of workers to use for running model prediction and scoring during
#: for each row in the dataset passed to the `mlflow.genai.evaluate` function.
#: (default: ``10``)
MLFLOW_GENAI_EVAL_MAX_WORKERS = _EnvironmentVariable("MLFLOW_GENAI_EVAL_MAX_WORKERS", int, 10)
#: Skip trace validation during GenAI evaluation. By default (False), MLflow will validate if
#: the given predict function generates a valid trace, and otherwise wraps it with @mlflow.trace
#: decorator to make sure a trace is generated. This validation requires running a single
#: prediction. When you are sure that the predict function generates a trace, set this to True
#: to skip the validation and save the time of running a single prediction.
MLFLOW_GENAI_EVAL_SKIP_TRACE_VALIDATION = _BooleanEnvironmentVariable(
"MLFLOW_GENAI_EVAL_SKIP_TRACE_VALIDATION", False
)
#: Enable tracing for evaluation scorers. By default (False), MLflow will not trace the scorer
#: function calls. To trace the scorer functions for debugging purpose, set this to True.
MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING = _BooleanEnvironmentVariable(
"MLFLOW_GENAI_EVAL_ENABLE_SCORER_TRACING", False
)
#: Whether to warn (default) or raise (opt-in) for unresolvable requirements inference for
#: a model's dependency inference. If set to True, an exception will be raised if requirements
#: inference or the process of capturing imported modules encounters any errors.
MLFLOW_REQUIREMENTS_INFERENCE_RAISE_ERRORS = _BooleanEnvironmentVariable(
"MLFLOW_REQUIREMENTS_INFERENCE_RAISE_ERRORS", False
)
# How many traces to display in Databricks Notebooks
MLFLOW_MAX_TRACES_TO_DISPLAY_IN_NOTEBOOK = _EnvironmentVariable(
"MLFLOW_MAX_TRACES_TO_DISPLAY_IN_NOTEBOOK", int, 10
)
#: Specifies the sampling ratio for traces. Value should be between 0.0 and 1.0.
#: A value of 1.0 means all traces are sampled (default behavior).
#: A value of 0.5 means 50% of traces are sampled.
#: A value of 0.0 means no traces are sampled.
#: (default: ``1.0``)
MLFLOW_TRACE_SAMPLING_RATIO = _EnvironmentVariable("MLFLOW_TRACE_SAMPLING_RATIO", float, 1.0)
#: When OTel export is configured and this is set to true, MLflow will write spans to BOTH
#: MLflow Tracking Server and OpenTelemetry Collector. When false (default), OTel export
#: replaces MLflow export.
#: (default: ``False``)
MLFLOW_TRACE_ENABLE_OTLP_DUAL_EXPORT = _BooleanEnvironmentVariable(
"MLFLOW_TRACE_ENABLE_OTLP_DUAL_EXPORT", False
)
#: Controls whether MLflow should export traces to OTLP endpoint when
#: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT is set. This allows users to disable MLflow's OTLP
#: export even when the OTEL endpoint is configured for other telemetry clients.
#: (default: ``True``)
MLFLOW_ENABLE_OTLP_EXPORTER = _BooleanEnvironmentVariable("MLFLOW_ENABLE_OTLP_EXPORTER", True)
#: By default, MLflow uses an isolated TracerProvider instance to generate traces, instead of the
#: OpenTelemetry's singleton TracerProvider. Set this to False to let MLflow share the same OTel
# TracerProvider and allow mixing MLflow SDK and Otel SDK to generate a single trace.
#: (default: ``True``)
MLFLOW_USE_DEFAULT_TRACER_PROVIDER = _BooleanEnvironmentVariable(
"MLFLOW_USE_DEFAULT_TRACER_PROVIDER", True
)
# Default addressing style to use for boto client
MLFLOW_BOTO_CLIENT_ADDRESSING_STYLE = _EnvironmentVariable(
"MLFLOW_BOTO_CLIENT_ADDRESSING_STYLE", str, "auto"
)
#: Specify the timeout in seconds for Databricks endpoint HTTP request retries.
MLFLOW_DATABRICKS_ENDPOINT_HTTP_RETRY_TIMEOUT = _EnvironmentVariable(
"MLFLOW_DATABRICKS_ENDPOINT_HTTP_RETRY_TIMEOUT", int, 500
)
#: Specifies the number of connection pools to cache in urllib3. This environment variable sets the
#: `pool_connections` parameter in the `requests.adapters.HTTPAdapter` constructor. By adjusting
#: this variable, users can enhance the concurrency of HTTP requests made by MLflow.
MLFLOW_HTTP_POOL_CONNECTIONS = _EnvironmentVariable("MLFLOW_HTTP_POOL_CONNECTIONS", int, 10)
#: Specifies the maximum number of connections to keep in the HTTP connection pool. This environment
#: variable sets the `pool_maxsize` parameter in the `requests.adapters.HTTPAdapter` constructor.
#: By adjusting this variable, users can enhance the concurrency of HTTP requests made by MLflow.
MLFLOW_HTTP_POOL_MAXSIZE = _EnvironmentVariable("MLFLOW_HTTP_POOL_MAXSIZE", int, 10)
#: Enable Unity Catalog integration for MLflow AI Gateway.
#: (default: ``False``)
MLFLOW_ENABLE_UC_FUNCTIONS = _BooleanEnvironmentVariable("MLFLOW_ENABLE_UC_FUNCTIONS", False)
#: Specifies the length of time in seconds for the asynchronous logging thread to wait before
#: logging a batch.
MLFLOW_ASYNC_LOGGING_BUFFERING_SECONDS = _EnvironmentVariable(
"MLFLOW_ASYNC_LOGGING_BUFFERING_SECONDS", int, None
)
#: Whether to enable Databricks SDK. If true, MLflow uses databricks-sdk to send HTTP requests
#: to Databricks endpoint, otherwise MLflow uses ``requests`` library to send HTTP requests
#: to Databricks endpoint. Note that if you want to use OAuth authentication, you have to
#: set this environment variable to true.
#: (default: ``True``)
MLFLOW_ENABLE_DB_SDK = _BooleanEnvironmentVariable("MLFLOW_ENABLE_DB_SDK", True)
#: A flag that's set to 'true' in the child process for capturing modules.
_MLFLOW_IN_CAPTURE_MODULE_PROCESS = _BooleanEnvironmentVariable(
"MLFLOW_IN_CAPTURE_MODULE_PROCESS", False
)
#: Use DatabricksSDKModelsArtifactRepository when registering and loading models to and from
#: Databricks UC. This is required for SEG(Secure Egress Gateway) enabled workspaces and helps
#: eliminate models exfiltration risk associated with temporary scoped token generation used in
#: existing model artifact repo classes.
MLFLOW_USE_DATABRICKS_SDK_MODEL_ARTIFACTS_REPO_FOR_UC = _BooleanEnvironmentVariable(
"MLFLOW_USE_DATABRICKS_SDK_MODEL_ARTIFACTS_REPO_FOR_UC", False
)
#: Disable Databricks SDK for run artifacts. We enable this by default since we want to
#: use Databricks SDK for run artifacts in most cases, but this gives us a way to disable
#: it for certain cases if needed.
MLFLOW_DISABLE_DATABRICKS_SDK_FOR_RUN_ARTIFACTS = _BooleanEnvironmentVariable(
"MLFLOW_DISABLE_DATABRICKS_SDK_FOR_RUN_ARTIFACTS", False
)
#: Skip signature validation check when migrating model versions from Databricks Workspace
#: Model Registry to Databricks Unity Catalog Model Registry.
#: (default: ``False``)
MLFLOW_SKIP_SIGNATURE_CHECK_FOR_UC_REGISTRY_MIGRATION = _BooleanEnvironmentVariable(
"MLFLOW_SKIP_SIGNATURE_CHECK_FOR_UC_REGISTRY_MIGRATION", False
)
# Specifies the model environment archive file downloading path when using
# ``mlflow.pyfunc.spark_udf``. (default: ``None``)
MLFLOW_MODEL_ENV_DOWNLOADING_TEMP_DIR = _EnvironmentVariable(
"MLFLOW_MODEL_ENV_DOWNLOADING_TEMP_DIR", str, None
)
# Specifies whether to log environment variable names used during model logging.
MLFLOW_RECORD_ENV_VARS_IN_MODEL_LOGGING = _BooleanEnvironmentVariable(
"MLFLOW_RECORD_ENV_VARS_IN_MODEL_LOGGING", True
)
#: Specifies the artifact compression method used when logging a model
#: allowed values are "lzma", "bzip2" and "gzip"
#: (default: ``None``, indicating no compression)
MLFLOW_LOG_MODEL_COMPRESSION = _EnvironmentVariable("MLFLOW_LOG_MODEL_COMPRESSION", str, None)
# Specifies whether to convert a {"messages": [{"role": "...", "content": "..."}]} input
# to a List[BaseMessage] object when invoking a PyFunc model saved with langchain flavor.
# This takes precedence over the default behavior of trying such conversion if the model
# is not an AgentExecutor and the input schema doesn't contain a 'messages' field.
MLFLOW_CONVERT_MESSAGES_DICT_FOR_LANGCHAIN = _BooleanEnvironmentVariable(
"MLFLOW_CONVERT_MESSAGES_DICT_FOR_LANGCHAIN", None
)
#: A boolean flag which enables additional functionality in Python tests for GO backend.
_MLFLOW_GO_STORE_TESTING = _BooleanEnvironmentVariable("MLFLOW_GO_STORE_TESTING", False)
# Specifies whether the current environment is a serving environment.
# This should only be used internally by MLflow to add some additional logic when running in a
# serving environment.
_MLFLOW_IS_IN_SERVING_ENVIRONMENT = _BooleanEnvironmentVariable(
"_MLFLOW_IS_IN_SERVING_ENVIRONMENT", None
)
#: Secret key for the Flask app. This is necessary for enabling CSRF protection
#: in the UI signup page when running the app with basic authentication enabled
MLFLOW_FLASK_SERVER_SECRET_KEY = _EnvironmentVariable("MLFLOW_FLASK_SERVER_SECRET_KEY", str, None)
#: (MLflow 3.5.0+) Comma-separated list of allowed CORS origins for the MLflow server.
#: Example: "http://localhost:3000,https://app.example.com"
#: Use "*" to allow ALL origins (DANGEROUS - only use for development!).
#: (default: ``None`` - localhost origins only)
MLFLOW_SERVER_CORS_ALLOWED_ORIGINS = _EnvironmentVariable(
"MLFLOW_SERVER_CORS_ALLOWED_ORIGINS", str, None
)
#: (MLflow 3.5.0+) Comma-separated list of allowed Host headers for the MLflow server.
#: Example: "mlflow.company.com,mlflow.internal:5000"
#: Use "*" to allow ALL hosts (not recommended for production).
#: If not set, defaults to localhost variants and private IP ranges.
#: (default: ``None`` - localhost and private IP ranges)
MLFLOW_SERVER_ALLOWED_HOSTS = _EnvironmentVariable("MLFLOW_SERVER_ALLOWED_HOSTS", str, None)
#: (MLflow 3.5.0+) Disable all security middleware (DANGEROUS - only use for testing!).
#: Set to "true" to disable security headers, CORS protection, and host validation.
#: (default: ``"false"``)
MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE = _EnvironmentVariable(
"MLFLOW_SERVER_DISABLE_SECURITY_MIDDLEWARE", str, "false"
)
#: (MLflow 3.5.0+) X-Frame-Options header value for clickjacking protection.
#: Options: "SAMEORIGIN" (default), "DENY", or "NONE" (disable).
#: Set to "NONE" to allow embedding MLflow UI in iframes from different origins.
#: (default: ``"SAMEORIGIN"``)
MLFLOW_SERVER_X_FRAME_OPTIONS = _EnvironmentVariable(
"MLFLOW_SERVER_X_FRAME_OPTIONS", str, "SAMEORIGIN"
)
#: Specifies the max length (in chars) of an experiment's artifact location.
#: The default is 2048.
MLFLOW_ARTIFACT_LOCATION_MAX_LENGTH = _EnvironmentVariable(
"MLFLOW_ARTIFACT_LOCATION_MAX_LENGTH", int, 2048
)
#: Path to SSL CA certificate file for MySQL connections
#: Used when creating a SQLAlchemy engine for MySQL
#: (default: ``None``)
MLFLOW_MYSQL_SSL_CA = _EnvironmentVariable("MLFLOW_MYSQL_SSL_CA", str, None)
#: Path to SSL certificate file for MySQL connections
#: Used when creating a SQLAlchemy engine for MySQL
#: (default: ``None``)
MLFLOW_MYSQL_SSL_CERT = _EnvironmentVariable("MLFLOW_MYSQL_SSL_CERT", str, None)
#: Path to SSL key file for MySQL connections
#: Used when creating a SQLAlchemy engine for MySQL
#: (default: ``None``)
MLFLOW_MYSQL_SSL_KEY = _EnvironmentVariable("MLFLOW_MYSQL_SSL_KEY", str, None)
#: Specifies the Databricks traffic ID to inject as x-databricks-traffic-id header
#: in HTTP requests to Databricks endpoints
#: (default: ``None``)
_MLFLOW_DATABRICKS_TRAFFIC_ID = _EnvironmentVariable("MLFLOW_DATABRICKS_TRAFFIC_ID", str, None)
#######################################################################################
# Tracing
#######################################################################################
#: Specifies whether to enable async trace logging to Databricks Tracing Server.
#: TODO: Update OSS MLflow Server to logging async by default
#: Default: ``True``.
MLFLOW_ENABLE_ASYNC_TRACE_LOGGING = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_ASYNC_TRACE_LOGGING", True
)
#: Maximum number of worker threads to use for async trace logging.
#: (default: ``10``)
MLFLOW_ASYNC_TRACE_LOGGING_MAX_WORKERS = _EnvironmentVariable(
"MLFLOW_ASYNC_TRACE_LOGGING_MAX_WORKERS", int, 10
)
#: Maximum number of export tasks to queue for async trace logging.
#: When the queue is full, new export tasks will be dropped.
#: (default: ``1000``)
MLFLOW_ASYNC_TRACE_LOGGING_MAX_QUEUE_SIZE = _EnvironmentVariable(
"MLFLOW_ASYNC_TRACE_LOGGING_MAX_QUEUE_SIZE", int, 1000
)
#: Timeout seconds for retrying trace logging.
#: (default: ``500``)
MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT = _EnvironmentVariable(
"MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT", int, 500
)
#: Specifies the SQL warehouse ID to use for tracing with Databricks backend.
#: (default: ``None``)
MLFLOW_TRACING_SQL_WAREHOUSE_ID = _EnvironmentVariable("MLFLOW_TRACING_SQL_WAREHOUSE_ID", str, None)
#: Specifies the location to send traces to. This can be either an MLflow experiment ID or a
#: Databricks Unity Catalog (UC) schema (format: `<catalog_name>.<schema_name>`).
#: (default: ``None`` (an active MLflow experiment will be used))
MLFLOW_TRACING_DESTINATION = _EnvironmentVariable("MLFLOW_TRACING_DESTINATION", str, None)
#######################################################################################
# Model Logging
#######################################################################################
#: The default active LoggedModel ID. Traces created while this variable is set (unless overridden,
#: e.g., by the `set_active_model()` API) will be associated with this LoggedModel ID.
#: (default: ``None``)
MLFLOW_ACTIVE_MODEL_ID = _EnvironmentVariable("MLFLOW_ACTIVE_MODEL_ID", str, None)
#: Legacy environment variable for setting the default active LoggedModel ID.
#: This should only by used by MLflow internally. Users should use the
#: public `MLFLOW_ACTIVE_MODEL_ID` environment variable or the `set_active_model`
#: API to set the active LoggedModel, and should not set this environment variable directly.
#: (default: ``None``)
_MLFLOW_ACTIVE_MODEL_ID = _EnvironmentVariable("_MLFLOW_ACTIVE_MODEL_ID", str, None)
#: Maximum number of parameters to include in the initial CreateLoggedModel request.
#: Additional parameters will be logged in separate requests.
#: (default: ``100``)
_MLFLOW_CREATE_LOGGED_MODEL_PARAMS_BATCH_SIZE = _EnvironmentVariable(
"_MLFLOW_CREATE_LOGGED_MODEL_PARAMS_BATCH_SIZE", int, 100
)
#: Maximum number of parameters to include in each batch when logging parameters
#: for a logged model.
#: (default: ``100``)
_MLFLOW_LOG_LOGGED_MODEL_PARAMS_BATCH_SIZE = _EnvironmentVariable(
"_MLFLOW_LOG_LOGGED_MODEL_PARAMS_BATCH_SIZE", int, 100
)
#: A boolean flag that enables printing URLs for logged and registered models when
#: they are created.
#: (default: ``True``)
MLFLOW_PRINT_MODEL_URLS_ON_CREATION = _BooleanEnvironmentVariable(
"MLFLOW_PRINT_MODEL_URLS_ON_CREATION", True
)
#: Maximum number of threads to use when downloading traces during search operations.
#: (default: ``max(32, (# of system CPUs * 4)``)
MLFLOW_SEARCH_TRACES_MAX_THREADS = _EnvironmentVariable(
# Threads used to download traces during search are network IO-bound (waiting for downloads)
# rather than CPU-bound, so we want more threads than CPU cores
"MLFLOW_SEARCH_TRACES_MAX_THREADS",
int,
max(32, (os.cpu_count() or 1) * 4),
)
#: Maximum number of traces to fetch in a single BatchGetTraces request during search operations.
#: (default: ``10``)
_MLFLOW_SEARCH_TRACES_MAX_BATCH_SIZE = _EnvironmentVariable(
"MLFLOW_SEARCH_TRACES_MAX_BATCH_SIZE", int, 10
)
_MLFLOW_DELETE_TRACES_MAX_BATCH_SIZE = _EnvironmentVariable(
"MLFLOW_DELETE_TRACES_MAX_BATCH_SIZE", int, 100
)
#: Specifies the logging level for MLflow. This can be set to any valid logging level
#: (e.g., "DEBUG", "INFO"). This environment must be set before importing mlflow to take
#: effect. To modify the logging level after importing mlflow, use `importlib.reload(mlflow)`.
#: (default: ``None``).
MLFLOW_LOGGING_LEVEL = _EnvironmentVariable("MLFLOW_LOGGING_LEVEL", str, None)
#: Avoid printing experiment and run url to stdout at run termination
#: (default: ``False``)
MLFLOW_SUPPRESS_PRINTING_URL_TO_STDOUT = _BooleanEnvironmentVariable(
"MLFLOW_SUPPRESS_PRINTING_URL_TO_STDOUT", False
)
#: If True, MLflow locks both direct and transitive model dependencies when logging a model.
#: (default: ``False``).
MLFLOW_LOCK_MODEL_DEPENDENCIES = _BooleanEnvironmentVariable(
"MLFLOW_LOCK_MODEL_DEPENDENCIES", False
)
#: If specified, tracking server rejects model `/mlflow/model-versions/create` requests with
#: a source that does not match the specified regular expression.
#: (default: ``None``).
MLFLOW_CREATE_MODEL_VERSION_SOURCE_VALIDATION_REGEX = _EnvironmentVariable(
"MLFLOW_CREATE_MODEL_VERSION_SOURCE_VALIDATION_REGEX", str, None
)
#: Maximum number of root fields to include in the MLflow server GraphQL request.
#: (default: ``10``)
MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS = _EnvironmentVariable(
"MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS", int, 10
)
#: Maximum number of aliases to include in the MLflow server GraphQL request.
#: (default: ``10``)
MLFLOW_SERVER_GRAPHQL_MAX_ALIASES = _EnvironmentVariable(
"MLFLOW_SERVER_GRAPHQL_MAX_ALIASES", int, 10
)
#: Whether to disable schema details in error messages for MLflow schema enforcement.
#: (default: ``False``)
MLFLOW_DISABLE_SCHEMA_DETAILS = _BooleanEnvironmentVariable("MLFLOW_DISABLE_SCHEMA_DETAILS", False)
def _split_strip(s: str) -> list[str]:
return [s.strip() for s in s.split(",")]
# Specifies the allowed schemes for MLflow webhook URLs.
# This environment variable is not intended for production use.
_MLFLOW_WEBHOOK_ALLOWED_SCHEMES = _EnvironmentVariable(
"MLFLOW_WEBHOOK_ALLOWED_SCHEMES", _split_strip, ["https"]
)
#: Specifies the secret key used to encrypt webhook secrets in MLflow.
MLFLOW_WEBHOOK_SECRET_ENCRYPTION_KEY = _EnvironmentVariable(
"MLFLOW_WEBHOOK_SECRET_ENCRYPTION_KEY", str, None
)
#: Specifies the timeout in seconds for webhook HTTP requests
#: (default: ``30``)
MLFLOW_WEBHOOK_REQUEST_TIMEOUT = _EnvironmentVariable("MLFLOW_WEBHOOK_REQUEST_TIMEOUT", int, 30)
#: Specifies the maximum number of threads for webhook delivery thread pool
#: (default: ``10``)
MLFLOW_WEBHOOK_DELIVERY_MAX_WORKERS = _EnvironmentVariable(
"MLFLOW_WEBHOOK_DELIVERY_MAX_WORKERS", int, 10
)
#: Specifies the maximum number of retries for webhook HTTP requests
#: (default: ``3``)
MLFLOW_WEBHOOK_REQUEST_MAX_RETRIES = _EnvironmentVariable(
"MLFLOW_WEBHOOK_REQUEST_MAX_RETRIES", int, 3
)
#: Specifies the TTL in seconds for webhook list cache
#: (default: ``60``)
MLFLOW_WEBHOOK_CACHE_TTL = _EnvironmentVariable("MLFLOW_WEBHOOK_CACHE_TTL", int, 60)
#: Whether to disable telemetry collection in MLflow. If set to True, no telemetry
#: data will be collected. (default: ``False``)
MLFLOW_DISABLE_TELEMETRY = _BooleanEnvironmentVariable("MLFLOW_DISABLE_TELEMETRY", False)
#: Internal flag to enable telemetry in mlflow tests.
#: (default: ``False``)
_MLFLOW_TESTING_TELEMETRY = _BooleanEnvironmentVariable("_MLFLOW_TESTING_TELEMETRY", False)
#: Internal environment variable to set the telemetry session id when TelemetryClient is initialized
#: This should never be set by users or explicitly.
#: (default: ``None``)
_MLFLOW_TELEMETRY_SESSION_ID = _EnvironmentVariable("_MLFLOW_TELEMETRY_SESSION_ID", str, None)
#: Internal flag to enable telemetry logging
#: (default: ``False``)
_MLFLOW_TELEMETRY_LOGGING = _BooleanEnvironmentVariable("_MLFLOW_TELEMETRY_LOGGING", False)
#: Internal environment variable to indicate which SGI is being used,
#: e.g. "uvicorn" or "gunicorn".
#: This should never be set by users or explicitly.
#: (default: ``None``)
_MLFLOW_SGI_NAME = _EnvironmentVariable("_MLFLOW_SGI_NAME", str, None)
#: Specifies whether to enforce using stdin scoring server in Spark udf.
#: (default: ``True``)
MLFLOW_ENFORCE_STDIN_SCORING_SERVER_FOR_SPARK_UDF = _BooleanEnvironmentVariable(
"MLFLOW_ENFORCE_STDIN_SCORING_SERVER_FOR_SPARK_UDF", True
)
#: Specifies whether to enable job execution feature for MLflow server.
#: This feature requires "huey" package dependency, and requires MLflow server to configure
#: --backend-store-uri to database URI.
#: (default: ``False``)
MLFLOW_SERVER_ENABLE_JOB_EXECUTION = _BooleanEnvironmentVariable(
"MLFLOW_SERVER_ENABLE_JOB_EXECUTION", False
)
#: Specifies MLflow server job maximum allowed retries for transient errors.
#: (default: ``3``)
MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES = _EnvironmentVariable(
"MLFLOW_SERVER_JOB_TRANSIENT_ERROR_MAX_RETRIES", int, 3
)
#: Specifies MLflow server job retry base delay in seconds for transient errors.
#: The retry uses exponential backoff strategy, retry delay is computed by
#: `delay = min(base_delay * (2 ** (retry_count - 1)), max_delay)`
#: (default: ``15``)
MLFLOW_SERVER_JOB_TRANSIENT_ERROR_RETRY_BASE_DELAY = _EnvironmentVariable(
"MLFLOW_SERVER_JOB_TRANSIENT_ERROR_RETRY_BASE_DELAY", int, 15
)
#: Specifies MLflow server job retry maximum delay in seconds for transient errors.
#: The retry uses exponential backoff strategy, retry delay is computed by
#: `delay = min(base_delay * (2 ** (retry_count - 1)), max_delay)`
#: (default: ``60``)
MLFLOW_SERVER_JOB_TRANSIENT_ERROR_RETRY_MAX_DELAY = _EnvironmentVariable(
"MLFLOW_SERVER_JOB_TRANSIENT_ERROR_RETRY_MAX_DELAY", int, 60
)
#: Specifies the maximum number of completion iterations allowed when invoking
#: judge models. This prevents infinite loops in case of complex traces or
#: issues with the judge's reasoning.
#: (default: ``30``)
MLFLOW_JUDGE_MAX_ITERATIONS = _EnvironmentVariable("MLFLOW_JUDGE_MAX_ITERATIONS", int, 30)
#: Enable automatic run resumption for Serverless GPU Compute (SGC) jobs on Databricks.
#: When enabled, MLflow will check for the SERVERLESS_GPU_COMPUTE_ASSOCIATED_JOB_RUN_ID job
#: parameter and automatically resume MLflow runs associated with that Databricks job run ID.
#: (default: ``True``)
_MLFLOW_ENABLE_SGC_RUN_RESUMPTION_FOR_DATABRICKS_JOBS = _BooleanEnvironmentVariable(
"MLFLOW_ENABLE_SGC_RUN_RESUMPTION_FOR_DATABRICKS_JOBS", True
)
|
_BooleanEnvironmentVariable
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-deepinfra/llama_index/llms/deepinfra/types.py
|
{
"start": 71,
"end": 210
}
|
class ____(BaseModel):
name: str
"""The name of the function."""
arguments: str
"""The arguments of the function."""
|
Function
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/select/tutorial001.py
|
{
"start": 100,
"end": 1141
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero)
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.