language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PrefectHQ__prefect
|
src/prefect/server/schemas/actions.py
|
{
"start": 14163,
"end": 14837
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a flow run."""
name: Optional[str] = Field(None)
flow_version: Optional[str] = Field(None)
parameters: Dict[str, Any] = Field(default_factory=dict)
empirical_policy: schemas.core.FlowRunPolicy = Field(
default_factory=schemas.core.FlowRunPolicy
)
tags: List[str] = Field(default_factory=list)
infrastructure_pid: Optional[str] = Field(None)
job_variables: Optional[Dict[str, Any]] = Field(None)
@field_validator("name", mode="before")
@classmethod
def set_name(cls, name: str) -> str:
return get_or_create_run_name(name)
|
FlowRunUpdate
|
python
|
encode__django-rest-framework
|
tests/test_viewsets.py
|
{
"start": 530,
"end": 658
}
|
class ____(GenericViewSet):
def list(self, request, *args, **kwargs):
return Response({'ACTION': 'LIST'})
|
BasicViewSet
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/kubernetes_helper_functions.py
|
{
"start": 2523,
"end": 7185
}
|
class ____(tenacity.wait.wait_base):
"""Wait strategy that honors Retry-After header on 429, else falls back to exponential backoff."""
def __call__(self, retry_state):
exc = retry_state.outcome.exception() if retry_state.outcome else None
if isinstance(exc, (SyncApiException, AsyncApiException)) and exc.status == 429:
retry_after = (exc.headers or {}).get("Retry-After")
if retry_after:
try:
return float(int(retry_after))
except ValueError:
pass
# Inline exponential fallback
return _default_wait(retry_state)
def generic_api_retry(func):
"""
Retry to Kubernetes API calls.
- Retries only transient ApiException status codes.
- Honors Retry-After on 429.
"""
return tenacity.retry(
stop=tenacity.stop_after_attempt(API_RETRIES),
wait=WaitRetryAfterOrExponential(),
retry=tenacity.retry_if_exception(_should_retry_api),
reraise=True,
before_sleep=tenacity.before_sleep_log(log, logging.WARNING),
)(func)
def rand_str(num):
"""
Generate random lowercase alphanumeric string of length num.
:meta private:
"""
return "".join(secrets.choice(alphanum_lower) for _ in range(num))
def add_unique_suffix(*, name: str, rand_len: int = 8, max_len: int = POD_NAME_MAX_LENGTH) -> str:
"""
Add random string to pod or job name while staying under max length.
:param name: name of the pod or job
:param rand_len: length of the random string to append
:param max_len: maximum length of the pod name
:meta private:
"""
suffix = "-" + rand_str(rand_len)
return name[: max_len - len(suffix)].strip("-.") + suffix
def create_unique_id(
dag_id: str | None = None,
task_id: str | None = None,
*,
max_length: int = POD_NAME_MAX_LENGTH,
unique: bool = True,
) -> str:
"""
Generate unique pod or job ID given a dag_id and / or task_id.
:param dag_id: DAG ID
:param task_id: Task ID
:param max_length: max number of characters
:param unique: whether a random string suffix should be added
:return: A valid identifier for a kubernetes pod name
"""
if not (dag_id or task_id):
raise ValueError("Must supply either dag_id or task_id.")
name = ""
if dag_id:
name += dag_id
if task_id:
if name:
name += "-"
name += task_id
base_name = slugify(name, lowercase=True)[:max_length].strip(".-")
if unique:
return add_unique_suffix(name=base_name, rand_len=8, max_len=max_length)
return base_name
def annotations_to_key(annotations: dict[str, str]) -> TaskInstanceKey:
"""Build a TaskInstanceKey based on pod annotations."""
log.debug("Creating task key for annotations %s", annotations)
dag_id = annotations["dag_id"]
task_id = annotations["task_id"]
try_number = int(annotations["try_number"])
annotation_run_id = annotations.get("run_id")
map_index = int(annotations.get("map_index", -1))
# Compat: Look up the run_id from the TI table!
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance, TaskInstanceKey
from airflow.settings import Session
logical_date_key = get_logical_date_key()
if not annotation_run_id and logical_date_key in annotations:
logical_date = pendulum.parse(annotations[logical_date_key])
# Do _not_ use create-session, we don't want to expunge
if Session is None:
raise RuntimeError("Session not configured. Call configure_orm() first.")
session = Session()
task_instance_run_id = (
session.query(TaskInstance.run_id)
.join(TaskInstance.dag_run)
.filter(
TaskInstance.dag_id == dag_id,
TaskInstance.task_id == task_id,
getattr(DagRun, logical_date_key) == logical_date,
)
.scalar()
)
else:
task_instance_run_id = annotation_run_id
return TaskInstanceKey(
dag_id=dag_id,
task_id=task_id,
run_id=task_instance_run_id,
try_number=try_number,
map_index=map_index,
)
@cache
def get_logs_task_metadata() -> bool:
return conf.getboolean("kubernetes_executor", "logs_task_metadata")
def annotations_for_logging_task_metadata(annotation_set):
if get_logs_task_metadata():
annotations_for_logging = annotation_set
else:
annotations_for_logging = "<omitted>"
return annotations_for_logging
|
WaitRetryAfterOrExponential
|
python
|
yaml__pyyaml
|
lib/yaml/dumper.py
|
{
"start": 1950,
"end": 2837
}
|
class ____(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style, sort_keys=sort_keys)
Resolver.__init__(self)
|
Dumper
|
python
|
spack__spack
|
lib/spack/spack/util/compression.py
|
{
"start": 17864,
"end": 18046
}
|
class ____(FileTypeInterface):
OFFSET = 257
_MAGIC_NUMBER_GNU = b"ustar \0"
_MAGIC_NUMBER_POSIX = b"ustar\x0000"
extension = "tar"
name = "tar archive"
|
TarFileType
|
python
|
pandas-dev__pandas
|
pandas/tests/indexing/test_loc.py
|
{
"start": 109712,
"end": 122354
}
|
class ____:
@pytest.mark.parametrize("val,expected", [(2**63 - 1, 3), (2**63, 4)])
def test_loc_uint64(self, val, expected):
# see GH#19399
ser = Series({2**63 - 1: 3, 2**63: 4})
assert ser.loc[val] == expected
def test_loc_getitem(self, string_series, datetime_series):
inds = string_series.index[[3, 4, 7]]
tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds))
tm.assert_series_equal(string_series.iloc[5::2], string_series[5::2])
# slice with indices
d1, d2 = datetime_series.index[[5, 15]]
result = datetime_series.loc[d1:d2]
expected = datetime_series.truncate(d1, d2)
tm.assert_series_equal(result, expected)
# boolean
mask = string_series > string_series.median()
tm.assert_series_equal(string_series.loc[mask], string_series[mask])
# ask for index value
assert datetime_series.loc[d1] == datetime_series[d1]
assert datetime_series.loc[d2] == datetime_series[d2]
def test_loc_getitem_not_monotonic(self, datetime_series):
d1, d2 = datetime_series.index[[5, 15]]
ts2 = datetime_series[::2].iloc[[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors(self):
ser = Series(
np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))
)
# this is OK
cp = ser.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = ser.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = ser.iloc[2:6]
result2 = ser.loc[3:11]
expected = ser.reindex([4, 6, 8, 10])
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = ser.iloc[list(range(5)) + list(range(9, 4, -1))]
with pytest.raises(KeyError, match=r"^3$"):
s2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
s2.loc[3:11] = 0
def test_loc_getitem_iterator(self, string_series):
idx = iter(string_series.index[:10])
result = string_series.loc[idx]
tm.assert_series_equal(result, string_series[:10])
def test_loc_setitem_boolean(self, string_series):
mask = string_series > string_series.median()
result = string_series.copy()
result.loc[mask] = 0
expected = string_series
expected[mask] = 0
tm.assert_series_equal(result, expected)
def test_loc_setitem_corner(self, string_series):
inds = list(string_series.index[[5, 8, 12]])
string_series.loc[inds] = 5
msg = r"\['foo'\] not in index"
with pytest.raises(KeyError, match=msg):
string_series.loc[inds + ["foo"]] = 5
def test_basic_setitem_with_labels(self, datetime_series):
indices = datetime_series.index[[5, 10, 15]]
cp = datetime_series.copy()
exp = datetime_series.copy()
cp[indices] = 0
exp.loc[indices] = 0
tm.assert_series_equal(cp, exp)
cp = datetime_series.copy()
exp = datetime_series.copy()
cp[indices[0] : indices[2]] = 0
exp.loc[indices[0] : indices[2]] = 0
tm.assert_series_equal(cp, exp)
def test_loc_setitem_listlike_of_ints(self):
# integer indexes, be careful
ser = Series(
np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))
)
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = ser.copy()
exp = ser.copy()
ser[inds] = 0
ser.loc[inds] = 0
tm.assert_series_equal(cp, exp)
cp = ser.copy()
exp = ser.copy()
ser[arr_inds] = 0
ser.loc[arr_inds] = 0
tm.assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
msg = r"\[5\] not in index"
with pytest.raises(KeyError, match=msg):
ser[inds_notfound] = 0
with pytest.raises(Exception, match=msg):
ser[arr_inds_notfound] = 0
def test_loc_setitem_dt64tz_values(self):
# GH#12089
ser = Series(
date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=["a", "b", "c"],
)
s2 = ser.copy()
expected = Timestamp("2011-01-03", tz="US/Eastern")
s2.loc["a"] = expected
result = s2.loc["a"]
assert result == expected
s2 = ser.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = ser.copy()
s2["a"] = expected
result = s2["a"]
assert result == expected
@pytest.mark.parametrize("array_fn", [np.array, pd.array, list, tuple])
@pytest.mark.parametrize("size", [0, 4, 5, 6])
def test_loc_iloc_setitem_with_listlike(self, size, array_fn):
# GH37748
# testing insertion, in a Series of size N (here 5), of a listlike object
# of size 0, N-1, N, N+1
arr = array_fn([0] * size)
expected = Series([arr, 0, 0, 0, 0], index=list("abcde"), dtype=object)
ser = Series(0, index=list("abcde"), dtype=object)
ser.loc["a"] = arr
tm.assert_series_equal(ser, expected)
ser = Series(0, index=list("abcde"), dtype=object)
ser.iloc[0] = arr
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize("indexer", [IndexSlice["A", :], ("A", slice(None))])
def test_loc_series_getitem_too_many_dimensions(self, indexer):
# GH#35349
ser = Series(
index=MultiIndex.from_tuples([("A", "0"), ("A", "1"), ("B", "0")]),
data=[21, 22, 23],
)
msg = "Too many indexers"
with pytest.raises(IndexingError, match=msg):
ser.loc[indexer, :]
with pytest.raises(IndexingError, match=msg):
ser.loc[indexer, :] = 1
def test_loc_setitem(self, string_series):
inds = string_series.index[[3, 4, 7]]
result = string_series.copy()
result.loc[inds] = 5
expected = string_series.copy()
expected.iloc[[3, 4, 7]] = 5
tm.assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
tm.assert_series_equal(result, expected)
# set slice with indices
d1, d2 = string_series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
tm.assert_series_equal(result, expected)
# set index value
string_series.loc[d1] = 4
string_series.loc[d2] = 6
assert string_series[d1] == 4
assert string_series[d2] == 6
@pytest.mark.parametrize("dtype", ["object", "string"])
def test_loc_assign_dict_to_row(self, dtype):
# GH41044
df = DataFrame({"A": ["abc", "def"], "B": ["ghi", "jkl"]}, dtype=dtype)
df.loc[0, :] = {"A": "newA", "B": "newB"}
expected = DataFrame({"A": ["newA", "def"], "B": ["newB", "jkl"]}, dtype=dtype)
tm.assert_frame_equal(df, expected)
def test_loc_setitem_dict_timedelta_multiple_set(self):
# GH 16309
result = DataFrame(columns=["time", "value"])
result.loc[1] = {"time": Timedelta(6, unit="s"), "value": "foo"}
result.loc[1] = {"time": Timedelta(6, unit="s"), "value": "foo"}
expected = DataFrame(
[[Timedelta(6, unit="s"), "foo"]], columns=["time", "value"], index=[1]
)
tm.assert_frame_equal(result, expected)
def test_loc_set_multiple_items_in_multiple_new_columns(self):
# GH 25594
df = DataFrame(index=[1, 2], columns=["a"])
df.loc[1, ["b", "c"]] = [6, 7]
expected = DataFrame(
{
"a": Series([np.nan, np.nan], dtype="object"),
"b": [6, np.nan],
"c": [7, np.nan],
},
index=[1, 2],
)
tm.assert_frame_equal(df, expected)
def test_getitem_loc_str_periodindex(self):
# GH#33964
msg = "Period with BDay freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
index = pd.period_range(start="2000", periods=20, freq="B")
series = Series(range(20), index=index)
assert series.loc["2000-01-14"] == 9
def test_loc_nonunique_masked_index(self):
# GH 57027
ids = list(range(11))
index = Index(ids * 1000, dtype="Int64")
df = DataFrame({"val": np.arange(len(index), dtype=np.intp)}, index=index)
result = df.loc[ids]
expected = DataFrame(
{"val": index.argsort(kind="stable").astype(np.intp)},
index=Index(np.array(ids).repeat(1000), dtype="Int64"),
)
tm.assert_frame_equal(result, expected)
def test_loc_index_alignment_for_series(self):
# GH #56024
df = DataFrame({"a": [1, 2], "b": [3, 4]})
other = Series([200, 999], index=[1, 0])
df.loc[:, "a"] = other
expected = DataFrame({"a": [999, 200], "b": [3, 4]})
tm.assert_frame_equal(expected, df)
@pytest.mark.parametrize("dtype", ["str", object])
def test_loc_reindexing_of_empty_index(self, dtype):
# GH 57735
df = DataFrame(index=[1, 1, 2, 2], data=["1", "1", "2", "2"], dtype=dtype)
df.loc[Series([False] * 4, index=df.index, name=0), 0] = df[0]
expected = DataFrame(index=[1, 1, 2, 2], data=["1", "1", "2", "2"], dtype=dtype)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"df, row_index, col_index, expected_df",
[
[
DataFrame([[1, 2, 3], [4, 5, 6]], columns=list("ABC")),
slice(0, 3),
["A", "B", "C"],
DataFrame([[10, 10, 10], [20, 20, 20]], columns=list("ABC")),
],
[
DataFrame([[1, 2, 3], [4, 5, 6]], columns=list("ABC")),
slice(None),
["A", "B", "C"],
DataFrame([[10, 10, 10], [20, 20, 20]], columns=list("ABC")),
],
[
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("ABC")),
[True, True, True],
["A", "B", "C"],
DataFrame(
[[10, 10, 10], [20, 20, 20], [30, 30, 30]], columns=list("ABC")
),
],
[
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("ABC")),
slice(0, 4),
["A", "B", "C"],
DataFrame(
[[10, 10, 10], [20, 20, 20], [30, 30, 30]], columns=list("ABC")
),
],
[
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("ABC")),
slice(None),
slice("A", "C"),
DataFrame(
[[10, 10, 10], [20, 20, 20], [30, 30, 30]], columns=list("ABC")
),
],
[
DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("ABC")),
slice(None),
Series(
{
"A": True,
"C": False,
"B": True,
}
),
DataFrame([[10, 10, 3], [20, 20, 6], [30, 30, 9]], columns=list("ABC")),
],
],
)
def test_loc_set_series_to_multiple_columns(
self, df, row_index, col_index, expected_df
):
# GH 59933
df.loc[row_index, col_index] = Series([10, 20, 30])
tm.assert_frame_equal(df, expected_df)
def test_loc_setitem_matching_index(self):
# GH 25548
s = Series(0.0, index=list("abcd"))
s1 = Series(1.0, index=list("ab"))
s2 = Series(2.0, index=list("xy"))
# Test matching indices
s.loc[["a", "b"]] = s1
result = s[["a", "b"]]
expected = s1
tm.assert_series_equal(result, expected)
# Test unmatched indices
s.loc[["a", "b"]] = s2
result = s[["a", "b"]]
expected = Series([np.nan, np.nan], index=["a", "b"])
tm.assert_series_equal(result, expected)
|
TestLocSeries
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/map/_center.py
|
{
"start": 235,
"end": 2800
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.map"
_path_str = "layout.map.center"
_valid_props = {"lat", "lon"}
@property
def lat(self):
"""
Sets the latitude of the center of the map (in degrees North).
The 'lat' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
@property
def lon(self):
"""
Sets the longitude of the center of the map (in degrees East).
The 'lon' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
@property
def _prop_descriptions(self):
return """\
lat
Sets the latitude of the center of the map (in degrees
North).
lon
Sets the longitude of the center of the map (in degrees
East).
"""
def __init__(self, arg=None, lat=None, lon=None, **kwargs):
"""
Construct a new Center object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.map.Center`
lat
Sets the latitude of the center of the map (in degrees
North).
lon
Sets the longitude of the center of the map (in degrees
East).
Returns
-------
Center
"""
super().__init__("center")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.map.Center
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.map.Center`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("lat", arg, lat)
self._set_property("lon", arg, lon)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Center
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/resolution/resolvelib/reporter.py
|
{
"start": 260,
"end": 2201
}
|
class ____(BaseReporter[Requirement, Candidate, str]):
def __init__(self) -> None:
self.reject_count_by_package: DefaultDict[str, int] = defaultdict(int)
self._messages_at_reject_count = {
1: (
"pip is looking at multiple versions of {package_name} to "
"determine which version is compatible with other "
"requirements. This could take a while."
),
8: (
"pip is still looking at multiple versions of {package_name} to "
"determine which version is compatible with other "
"requirements. This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide "
"the dependency resolver with stricter constraints to reduce "
"runtime. See https://pip.pypa.io/warnings/backtracking for "
"guidance. If you want to abort this run, press Ctrl + C."
),
}
def rejecting_candidate(self, criterion: Any, candidate: Candidate) -> None:
self.reject_count_by_package[candidate.name] += 1
count = self.reject_count_by_package[candidate.name]
if count not in self._messages_at_reject_count:
return
message = self._messages_at_reject_count[count]
logger.info("INFO: %s", message.format(package_name=candidate.name))
msg = "Will try a different candidate, due to conflict:"
for req_info in criterion.information:
req, parent = req_info.requirement, req_info.parent
# Inspired by Factory.get_installation_error
msg += "\n "
if parent:
msg += f"{parent.name} {parent.version} depends on "
else:
msg += "The user requested "
msg += req.format_for_error()
logger.debug(msg)
|
PipReporter
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/commands/release_management_commands.py
|
{
"start": 10076,
"end": 106240
}
|
class ____(NamedTuple):
filepath: Path
package: str
version: Version
dist_type: Literal["sdist", "wheel"]
@classmethod
def from_sdist(cls, filepath: Path) -> DistributionPackageInfo:
from packaging.utils import parse_sdist_filename
package, version = parse_sdist_filename(filepath.name)
return cls(
filepath=filepath.resolve().absolute(), package=package, version=version, dist_type="sdist"
)
@classmethod
def from_wheel(cls, filepath: Path) -> DistributionPackageInfo:
from packaging.utils import parse_wheel_filename
package, version, *_ = parse_wheel_filename(filepath.name)
return cls(
filepath=filepath.resolve().absolute(), package=package, version=version, dist_type="wheel"
)
@classmethod
def dist_packages(
cls,
*,
distribution_format: str,
dist_directory: Path,
build_type: DistributionBuildType,
) -> tuple[DistributionPackageInfo, ...]:
if build_type == DistributionBuildType.AIRFLOW:
default_glob_patterns = ["apache_airflow-", "apache_airflow_core-"]
elif build_type == DistributionBuildType.TASK_SDK:
default_glob_patterns = ["apache_airflow_task_sdk"]
elif build_type == DistributionBuildType.AIRFLOW_CTL:
default_glob_patterns = ["apache_airflow_ctl"]
else:
default_glob_patterns = ["apache_airflow_providers"]
dists_info = []
if distribution_format in ["sdist", "both"]:
for default_glob_pattern in default_glob_patterns:
for file in dist_directory.glob(f"{default_glob_pattern}*.tar.gz"):
if not file.is_file() or "-source.tar.gz" in file.name:
continue
dists_info.append(cls.from_sdist(filepath=file))
if distribution_format in ["wheel", "both"]:
for default_glob_pattern in default_glob_patterns:
for file in dist_directory.glob(f"{default_glob_pattern}*.whl"):
if not file.is_file():
continue
dists_info.append(cls.from_wheel(filepath=file))
return tuple(sorted(dists_info, key=lambda di: (di.package, di.dist_type)))
def __str__(self):
return f"{self.package} ({self.version}): {self.dist_type} - {self.filepath.name}"
def _build_local_build_image():
# This is security feature.
#
# Building the image needed to build airflow package including .git directory
# In isolated environment, to not allow the in-docker code to override local code
# The image used to build airflow package is built from scratch and contains
# Full Airflow code including Airflow repository is added to the image, but locally build node_modules
# are not added to the context of that image
AIRFLOW_BUILD_DOCKERFILE_PATH.unlink(missing_ok=True)
AIRFLOW_BUILD_DOCKERFILE_PATH.write_text(AIRFLOW_BUILD_DOCKERFILE)
AIRFLOW_BUILD_DOCKERFILE_DOCKERIGNORE_PATH.unlink(missing_ok=True)
dockerignore_content = AIRFLOW_DOCKERIGNORE_PATH.read_text()
dockerignore_content = dockerignore_content + textwrap.dedent("""
# Include git in the build context - we need to get git version and prek configuration
# And clients python code to be included in the context
!.git
!.pre-commit-config.yaml
!clients/python
""")
AIRFLOW_BUILD_DOCKERFILE_DOCKERIGNORE_PATH.write_text(dockerignore_content)
run_command(
[
"docker",
"build",
".",
"-f",
"airflow-build-dockerfile",
"--load",
"--tag",
AIRFLOW_BUILD_IMAGE_TAG,
],
text=True,
check=True,
cwd=AIRFLOW_ROOT_PATH,
env={"DOCKER_CLI_HINTS": "false"},
)
def _build_airflow_packages_with_docker(
distribution_format: str, source_date_epoch: int, version_suffix: str
):
with apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix=version_suffix,
init_file_path=AIRFLOW_CORE_SOURCES_PATH / "airflow" / "__init__.py",
pyproject_toml_paths=[
AIRFLOW_ROOT_PATH / "pyproject.toml",
AIRFLOW_CORE_ROOT_PATH / "pyproject.toml",
],
) as pyproject_toml_paths:
debug_pyproject_tomls(pyproject_toml_paths)
_build_local_build_image()
container_id = f"airflow-build-{random.getrandbits(64):08x}"
result = run_command(
cmd=[
"docker",
"run",
"--name",
container_id,
"-t",
"-e",
f"SOURCE_DATE_EPOCH={source_date_epoch}",
"-e",
"HOME=/opt/airflow/files/home",
"-e",
"GITHUB_ACTIONS",
"-e",
f"DISTRIBUTION_FORMAT={distribution_format}",
"-w",
"/opt/airflow",
AIRFLOW_BUILD_IMAGE_TAG,
"python",
"/opt/airflow/scripts/in_container/run_prepare_airflow_distributions.py",
],
check=False,
)
if result.returncode != 0:
get_console().print("[error]Error preparing Airflow package[/]")
fix_ownership_using_docker()
sys.exit(result.returncode)
AIRFLOW_DIST_PATH.mkdir(parents=True, exist_ok=True)
# Copy all files in the dist directory in container to the host dist directories (note '/.' in SRC)
run_command(["docker", "cp", f"{container_id}:/opt/airflow/dist/.", "./dist"], check=True)
run_command(["docker", "cp", f"{container_id}:/opt/airflow/airflow-core/dist/.", "./dist"], check=True)
run_command(["docker", "rm", "--force", container_id], check=False, stderr=DEVNULL, stdout=DEVNULL)
def apply_distribution_format_to_hatch_command(build_command: list[str], distribution_format: str):
if distribution_format in ["sdist", "both"]:
build_command.extend(["-t", "sdist"])
if distribution_format in ["wheel", "both"]:
build_command.extend(["-t", "wheel"])
def _build_airflow_packages_with_hatch(distribution_format: str, source_date_epoch: int, version_suffix: str):
env_copy = os.environ.copy()
env_copy["SOURCE_DATE_EPOCH"] = str(source_date_epoch)
build_airflow_core_command = ["hatch", "build", "-c", "-t", "custom"]
apply_distribution_format_to_hatch_command(build_airflow_core_command, distribution_format)
get_console().print(f"[bright_blue]Building apache-airflow-core distributions: {distribution_format}\n")
with apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix=version_suffix,
init_file_path=AIRFLOW_CORE_SOURCES_PATH / "airflow" / "__init__.py",
pyproject_toml_paths=[AIRFLOW_CORE_ROOT_PATH / "pyproject.toml"],
) as pyproject_toml_paths:
debug_pyproject_tomls(pyproject_toml_paths)
run_command(
build_airflow_core_command,
check=True,
env=env_copy,
cwd=AIRFLOW_CORE_ROOT_PATH,
)
get_console().print(f"[bright_blue]Building apache-airflow distributions: {distribution_format}\n")
build_airflow_command = ["hatch", "build", "-c"]
apply_distribution_format_to_hatch_command(build_airflow_command, distribution_format)
with apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix=version_suffix,
init_file_path=AIRFLOW_CORE_SOURCES_PATH / "airflow" / "__init__.py",
pyproject_toml_paths=[AIRFLOW_ROOT_PATH / "pyproject.toml"],
) as pyproject_toml_paths:
debug_pyproject_tomls(pyproject_toml_paths)
run_command(
build_airflow_command,
check=True,
env=env_copy,
cwd=AIRFLOW_ROOT_PATH,
)
for distribution_path in (AIRFLOW_CORE_ROOT_PATH / "dist").glob("apache_airflow_core*"):
shutil.move(distribution_path, AIRFLOW_DIST_PATH)
def _check_sdist_to_wheel_dists(dists_info: tuple[DistributionPackageInfo, ...]):
venv_created = False
success_build = True
with tempfile.TemporaryDirectory() as tmp_dir_name:
for di in dists_info:
if di.dist_type != "sdist":
continue
if not venv_created:
python_path = create_venv(
Path(tmp_dir_name) / ".venv",
pip_version=AIRFLOW_PIP_VERSION,
uv_version=AIRFLOW_UV_VERSION,
)
venv_created = True
returncode = _check_sdist_to_wheel(python_path, di, str(tmp_dir_name))
if returncode != 0:
success_build = False
if not success_build:
get_console().print(
"\n[errors]Errors detected during build wheel distribution(s) from sdist. Exiting!\n"
)
sys.exit(1)
def _check_sdist_to_wheel(python_path: Path, dist_info: DistributionPackageInfo, cwd: str) -> int:
get_console().print(
f"[info]Validate build wheel from sdist distribution for package {dist_info.package!r}.[/]"
)
result_build_wheel = run_command(
[
"uv",
"build",
"--wheel",
"--out-dir",
cwd,
dist_info.filepath.as_posix(),
],
check=False,
# We should run `pip wheel` outside of Project directory for avoid the case
# when some files presented into the project directory, but not included in sdist.
cwd=cwd,
capture_output=True,
text=True,
)
if (returncode := result_build_wheel.returncode) == 0:
get_console().print(
f"[success]Successfully build wheel from sdist distribution for package {dist_info.package!r}.[/]"
)
else:
get_console().print(
f"[error]Unable to build wheel from sdist distribution for package {dist_info.package!r}.[/]\n"
f"{result_build_wheel.stdout}\n{result_build_wheel.stderr}"
)
return returncode
@release_management_group.command(
name="prepare-airflow-distributions",
help="Prepare sdist/whl package of Airflow.",
)
@option_distribution_format
@option_version_suffix
@option_use_local_hatch
@option_verbose
@option_dry_run
def prepare_airflow_distributions(
distribution_format: str,
version_suffix: str,
use_local_hatch: bool,
):
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
source_date_epoch = get_source_date_epoch(AIRFLOW_ROOT_PATH)
if use_local_hatch:
_build_airflow_packages_with_hatch(
distribution_format=distribution_format,
source_date_epoch=source_date_epoch,
version_suffix=version_suffix,
)
get_console().print("[info]Checking if sdist packages can be built into wheels[/]")
packages = DistributionPackageInfo.dist_packages(
distribution_format=distribution_format,
dist_directory=AIRFLOW_DIST_PATH,
build_type=DistributionBuildType.AIRFLOW,
)
get_console().print()
_check_sdist_to_wheel_dists(packages)
get_console().print("\n[info]Packages available in dist:[/]\n")
for dist_info in packages:
get_console().print(str(dist_info))
get_console().print()
else:
_build_airflow_packages_with_docker(
distribution_format=distribution_format,
source_date_epoch=source_date_epoch,
version_suffix=version_suffix,
)
get_console().print("[success]Successfully prepared Airflow packages")
def _prepare_non_core_distributions(
distribution_format: str,
version_suffix: str,
use_local_hatch: bool,
root_path: Path,
init_file_path: Path,
distribution_path: Path,
distribution_name: str,
distribution_pretty_name: str,
full_distribution_pretty_name: str | None = None,
):
if full_distribution_pretty_name is not None:
distribution_pretty_name = full_distribution_pretty_name
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
source_date_epoch = get_source_date_epoch(AIRFLOW_ROOT_PATH)
def _build_package_with_hatch(build_distribution_format: str):
command = [
"hatch",
"build",
"-c",
]
if build_distribution_format == "sdist" or build_distribution_format == "both":
command += ["-t", "sdist"]
if build_distribution_format == "wheel" or build_distribution_format == "both":
command += ["-t", "wheel"]
env_copy = os.environ.copy()
env_copy["SOURCE_DATE_EPOCH"] = str(source_date_epoch)
run_command(
cmd=command,
cwd=root_path,
env=env_copy,
check=True,
)
shutil.copytree(distribution_path, AIRFLOW_DIST_PATH, dirs_exist_ok=True)
def _build_package_with_docker(build_distribution_format: str):
_build_local_build_image()
command = "hatch build -c "
if build_distribution_format == "sdist" or build_distribution_format == "both":
command += "-t sdist "
if build_distribution_format == "wheel" or build_distribution_format == "both":
command += "-t wheel "
container_id = f"airflow-{distribution_name}-build-{random.getrandbits(64):08x}"
result = run_command(
cmd=[
"docker",
"run",
"--name",
container_id,
"-t",
"-e",
f"SOURCE_DATE_EPOCH={source_date_epoch}",
"-e",
"HOME=/opt/airflow/files/home",
"-e",
"GITHUB_ACTIONS",
"-w",
f"/opt/airflow/{distribution_name}",
AIRFLOW_BUILD_IMAGE_TAG,
"bash",
"-c",
command,
],
check=False,
)
if result.returncode != 0:
get_console().print(f"[error]Error preparing Airflow {distribution_pretty_name}[/]")
fix_ownership_using_docker()
sys.exit(result.returncode)
AIRFLOW_DIST_PATH.mkdir(parents=True, exist_ok=True)
get_console().print()
# Copy all files in the dist directory in container to the host dist directory (note '/.' in SRC)
run_command(
["docker", "cp", f"{container_id}:/opt/airflow/{distribution_name}/dist/.", "./dist"], check=True
)
run_command(["docker", "rm", "--force", container_id], check=False, stdout=DEVNULL, stderr=DEVNULL)
if use_local_hatch:
with apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix=version_suffix,
init_file_path=init_file_path,
pyproject_toml_paths=[TASK_SDK_ROOT_PATH / "pyproject.toml"],
) as pyproject_toml_paths:
debug_pyproject_tomls(pyproject_toml_paths)
_build_package_with_hatch(
build_distribution_format=distribution_format,
)
get_console().print("[info]Checking if sdist packages can be built into wheels[/]")
packages = DistributionPackageInfo.dist_packages(
distribution_format=distribution_format,
dist_directory=distribution_path,
build_type=DistributionBuildType(distribution_name),
)
get_console().print()
_check_sdist_to_wheel_dists(packages)
get_console().print("\n[info]Packages available in dist:[/]\n")
for dist_info in packages:
get_console().print(str(dist_info))
get_console().print()
else:
with apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix=version_suffix,
init_file_path=init_file_path,
pyproject_toml_paths=[root_path / "pyproject.toml"],
) as pyproject_toml_paths:
debug_pyproject_tomls(pyproject_toml_paths)
_build_package_with_docker(
build_distribution_format=distribution_format,
)
get_console().print(
f"[success]Successfully prepared {f'Airflow {distribution_pretty_name}' if not full_distribution_pretty_name else full_distribution_pretty_name} packages"
)
@release_management_group.command(
name="prepare-task-sdk-distributions",
help="Prepare sdist/whl distributions of Airflow Task SDK.",
)
@option_distribution_format
@option_version_suffix
@option_use_local_hatch
@option_verbose
@option_dry_run
def prepare_task_sdk_distributions(
distribution_format: str,
version_suffix: str,
use_local_hatch: bool,
):
_prepare_non_core_distributions(
# Argument parameters
distribution_format=distribution_format,
version_suffix=version_suffix,
use_local_hatch=use_local_hatch,
# Distribution specific parameters
root_path=TASK_SDK_ROOT_PATH,
init_file_path=TASK_SDK_SOURCES_PATH / "airflow" / "sdk" / "__init__.py",
distribution_path=TASK_SDK_DIST_PATH,
distribution_name="task-sdk",
distribution_pretty_name="Task SDK",
)
@release_management_group.command(
name="prepare-airflow-ctl-distributions",
help="Prepare sdist/whl distributions of airflowctl.",
)
@option_distribution_format
@option_version_suffix
@option_use_local_hatch
@option_verbose
@option_dry_run
def prepare_airflow_ctl_distributions(
distribution_format: str,
version_suffix: str,
use_local_hatch: bool,
):
_prepare_non_core_distributions(
# Argument parameters
distribution_format=distribution_format,
version_suffix=version_suffix,
use_local_hatch=use_local_hatch,
# Distribution specific parameters
root_path=AIRFLOW_CTL_ROOT_PATH,
init_file_path=AIRFLOW_CTL_SOURCES_PATH / "airflowctl" / "__init__.py",
distribution_path=AIRFLOW_CTL_DIST_PATH,
distribution_name="airflow-ctl",
distribution_pretty_name="",
full_distribution_pretty_name="airflowctl",
)
def provider_action_summary(description: str, message_type: MessageType, packages: list[str]):
if packages:
get_console().print(f"{description}: {len(packages)}\n")
get_console().print(f"[{message_type.value}]{' '.join(packages)}")
get_console().print()
@release_management_group.command(
name="prepare-provider-documentation",
help="Prepare CHANGELOG, README and COMMITS information for providers.",
)
@click.option(
"--skip-git-fetch",
is_flag=True,
help="Skips removal and recreation of `apache-https-for-providers` remote in git. By default, the "
"remote is recreated and fetched to make sure that it's up to date and that recent commits "
"are not missing",
)
@click.option(
"--base-branch",
type=str,
default="main",
help="Base branch to use as diff for documentation generation (used for releasing from old branch)",
)
@option_github_repository
@option_include_not_ready_providers
@option_include_removed_providers
@click.option(
"--non-interactive",
is_flag=True,
help="Run in non-interactive mode. Provides random answers to the type of changes and confirms release"
"for providers prepared for release - useful to test the script in non-interactive mode in CI.",
)
@click.option(
"--only-min-version-update",
is_flag=True,
help="Only update minimum version in __init__.py files and regenerate corresponding documentation",
)
@click.option(
"--reapply-templates-only",
is_flag=True,
help="Only reapply templates, do not bump version. Useful if templates were added"
" and you need to regenerate documentation.",
)
@click.option(
"--skip-changelog",
is_flag=True,
help="Skip changelog generation. This is used in prek that updates build-files only.",
)
@click.option(
"--incremental-update",
is_flag=True,
help="Runs incremental update only after rebase of earlier branch to check if there are no changes.",
)
@click.option(
"--skip-readme",
is_flag=True,
help="Skip readme generation. This is used in prek that updates build-files only.",
)
@click.option(
"--release-date",
type=str,
callback=validate_release_date,
envvar="RELEASE_DATE",
help="Planned release date for the providers release in format "
"YYYY-MM-DD[_NN] (e.g., 2025-11-16 or 2025-11-16_01).",
)
@argument_provider_distributions
@option_verbose
@option_answer
@option_dry_run
def prepare_provider_documentation(
base_branch: str,
github_repository: str,
include_not_ready_providers: bool,
include_removed_providers: bool,
non_interactive: bool,
only_min_version_update: bool,
provider_distributions: tuple[str],
reapply_templates_only: bool,
skip_git_fetch: bool,
skip_changelog: bool,
skip_readme: bool,
incremental_update: bool,
release_date: str | None,
):
from airflow_breeze.prepare_providers.provider_documentation import (
PrepareReleaseDocsChangesOnlyException,
PrepareReleaseDocsErrorOccurredException,
PrepareReleaseDocsNoChangesException,
PrepareReleaseDocsUserQuitException,
PrepareReleaseDocsUserSkippedException,
update_changelog,
update_min_airflow_version_and_build_files,
update_release_notes,
)
if not release_date and not only_min_version_update:
get_console().print("[error]Release date is required unless --only-min-version-update is used![/]")
sys.exit(1)
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
if incremental_update:
set_forced_answer("yes")
if not provider_distributions:
provider_distributions = get_available_distributions(
include_removed=include_removed_providers, include_not_ready=include_not_ready_providers
)
if not skip_git_fetch:
run_command(["git", "remote", "rm", "apache-https-for-providers"], check=False, stderr=DEVNULL)
make_sure_remote_apache_exists_and_fetch(github_repository=github_repository)
no_changes_packages = []
doc_only_packages = []
error_packages = []
user_skipped_packages = []
success_packages = []
suspended_packages = []
removed_packages = []
for provider_id in provider_distributions:
provider_metadata = basic_provider_checks(provider_id)
if os.environ.get("GITHUB_ACTIONS", "false") != "true":
if not only_min_version_update:
get_console().print("-" * get_console().width)
try:
with_breaking_changes = False
maybe_with_new_features = False
with ci_group(
f"Update release notes for package '{provider_id}' ",
skip_printing_title=only_min_version_update,
):
if not only_min_version_update and not reapply_templates_only:
get_console().print("Updating documentation for the latest release version.")
with_breaking_changes, maybe_with_new_features, with_min_airflow_version_bump = (
update_release_notes(
provider_id,
reapply_templates_only=reapply_templates_only,
base_branch=base_branch,
regenerate_missing_docs=reapply_templates_only,
non_interactive=non_interactive,
only_min_version_update=only_min_version_update,
)
)
update_min_airflow_version_and_build_files(
provider_id=provider_id,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
skip_readme=skip_readme,
)
if not only_min_version_update and not reapply_templates_only and not skip_changelog:
with ci_group(
f"Updates changelog for last release of package '{provider_id}'",
skip_printing_title=only_min_version_update,
):
update_changelog(
package_id=provider_id,
base_branch=base_branch,
reapply_templates_only=reapply_templates_only,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
only_min_version_update=only_min_version_update,
with_min_airflow_version_bump=with_min_airflow_version_bump,
)
except PrepareReleaseDocsNoChangesException:
no_changes_packages.append(provider_id)
except PrepareReleaseDocsChangesOnlyException:
doc_only_packages.append(provider_id)
except PrepareReleaseDocsErrorOccurredException:
error_packages.append(provider_id)
except PrepareReleaseDocsUserSkippedException:
user_skipped_packages.append(provider_id)
except PackageSuspendedException:
suspended_packages.append(provider_id)
except PrepareReleaseDocsUserQuitException:
break
else:
if provider_metadata["state"] == "removed":
removed_packages.append(provider_id)
else:
success_packages.append(provider_id)
get_console().print()
get_console().print("\n[info]Summary of prepared documentation:\n")
provider_action_summary(
"Success" if not only_min_version_update else "Min Version Bumped",
MessageType.SUCCESS,
success_packages,
)
provider_action_summary("Scheduled for removal", MessageType.SUCCESS, removed_packages)
provider_action_summary("Docs only", MessageType.SUCCESS, doc_only_packages)
provider_action_summary(
"Skipped on no changes" if not only_min_version_update else "Min Version Not Bumped",
MessageType.WARNING,
no_changes_packages,
)
provider_action_summary("Suspended", MessageType.WARNING, suspended_packages)
provider_action_summary("Skipped by user", MessageType.SPECIAL, user_skipped_packages)
provider_action_summary("Errors", MessageType.ERROR, error_packages)
if error_packages:
get_console().print("\n[errors]There were errors when generating packages. Exiting!\n")
sys.exit(1)
if not success_packages and not doc_only_packages and not removed_packages:
get_console().print("\n[warning]No packages prepared!\n")
sys.exit(0)
get_console().print("\n[success]Successfully prepared documentation for packages!\n\n")
get_console().print(
"\n[info]Please review the updated files, classify the changelog entries and commit the changes.\n"
)
if release_date:
AIRFLOW_PROVIDERS_LAST_RELEASE_DATE_PATH.write_text(release_date + "\n")
if incremental_update:
get_console().print(r"\[warning] Generated changes:")
run_command(["git", "diff"])
get_console().print("\n")
get_console().print("[warning]Important")
get_console().print(
" * Please review manually the changes in changelogs above and move the new changelog "
"entries to the right sections."
)
get_console().print(
"* Remove the `Please review ...` comments from the changelogs after moving changeslogs"
)
get_console().print(
"* Update both changelog.rst AND provider.yaml in case the new changes require "
"different classification of the upgrade (patchlevel/minor/major)"
)
def basic_provider_checks(provider_id: str) -> dict[str, Any]:
provider_distributions_metadata = get_provider_distributions_metadata()
get_console().print(f"\n[info]Reading provider package metadata: {provider_id}[/]")
provider_metadata = provider_distributions_metadata.get(provider_id)
if not provider_metadata:
get_console().print(
f"[error]The package {provider_id} could not be found in the list "
f"of provider distributions. Exiting[/]"
)
get_console().print("Available provider distributions:")
get_console().print(provider_distributions_metadata)
sys.exit(1)
if provider_metadata["state"] == "removed":
get_console().print(
f"[warning]The package: {provider_id} is scheduled for removal, but "
f"since you asked for it, it will be built [/]\n"
)
elif provider_metadata.get("state") == "suspended":
get_console().print(f"[warning]The package: {provider_id} is suspended skipping it [/]\n")
raise PackageSuspendedException()
return provider_metadata
def _build_provider_distributions(
provider_id: str,
package_version_suffix: str,
distribution_format: str,
skip_tag_check: bool,
skip_deleting_generated_files: bool,
) -> bool:
"""
Builds provider distribution.
:param provider_id: id of the provider package
:param package_version_suffix: suffix to append to the package version
:param distribution_format: format of the distribution to build (wheel or sdist)
:param skip_tag_check: whether to skip tag check
:param skip_deleting_generated_files: whether to skip deleting generated files
:return: True if package was built, False if it was skipped.
"""
if not skip_tag_check:
should_skip, package_version_suffix = should_skip_the_package(provider_id, package_version_suffix)
if should_skip:
return False
get_console().print()
with ci_group(f"Preparing provider package [special]{provider_id}"):
get_console().print()
get_console().print(
f"[info]Provider {provider_id} building in-place with suffix: '{package_version_suffix}'."
)
with apply_version_suffix_to_provider_pyproject_toml(
provider_id, package_version_suffix
) as pyproject_toml_file:
provider_root_dir = pyproject_toml_file.parent
cleanup_build_remnants(provider_root_dir)
build_provider_distribution(
provider_id=provider_id,
distribution_format=distribution_format,
target_provider_root_sources_path=provider_root_dir,
)
move_built_distributions_and_cleanup(
provider_root_dir,
AIRFLOW_DIST_PATH,
skip_cleanup=skip_deleting_generated_files,
delete_only_build_and_dist_folders=True,
)
return True
@release_management_group.command(
name="prepare-provider-distributions",
help="Prepare sdist/whl distributions of Airflow Providers.",
)
@option_distribution_format
@option_version_suffix
@click.option(
"--distributions-list-file",
type=click.File("rt"),
help="Read list of packages from text file (one package per line).",
)
@click.option(
"--skip-tag-check",
default=False,
is_flag=True,
help="Skip checking if the tag already exists in the remote repository",
)
@click.option(
"--skip-deleting-generated-files",
default=False,
is_flag=True,
help="Skip deleting files that were used to generate provider package. Useful for debugging and "
"developing changes to the build process.",
)
@click.option(
"--clean-dist",
default=False,
is_flag=True,
help="Clean dist directory before building packages. Useful when you want to build multiple packages "
" in a clean environment",
)
@click.option(
"--distributions-list",
envvar="DISTRIBUTIONS_LIST",
type=str,
help="Optional, contains space separated list of package ids that are processed for documentation "
"building, and document publishing. It is an easier alternative to adding individual packages as"
" arguments to every command. This overrides the packages passed as arguments.",
)
@option_dry_run
@option_github_repository
@option_include_not_ready_providers
@option_include_removed_providers
@argument_provider_distributions
@option_verbose
def prepare_provider_distributions(
clean_dist: bool,
distributions_list: str,
github_repository: str,
include_not_ready_providers: bool,
include_removed_providers: bool,
distribution_format: str,
distributions_list_file: IO | None,
provider_distributions: tuple[str, ...],
skip_deleting_generated_files: bool,
skip_tag_check: bool,
version_suffix: str,
):
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
distributions_list_as_tuple: tuple[str, ...] = ()
if distributions_list and len(distributions_list):
get_console().print(
f"\n[info]Populating provider list from DISTRIBUTIONS_LIST env as {distributions_list}"
)
# Override provider_distributions with values from DISTRIBUTIONS_LIST
distributions_list_as_tuple = tuple(distributions_list.split(" "))
if provider_distributions and distributions_list_as_tuple:
get_console().print(
f"[warning]Both package arguments and --distributions-list / DISTRIBUTIONS_LIST passed. "
f"Overriding to {distributions_list_as_tuple}"
)
provider_distributions = distributions_list_as_tuple or provider_distributions
packages_list = get_packages_list_to_act_on(
distributions_list_file=distributions_list_file,
provider_distributions=provider_distributions,
include_removed=include_removed_providers,
include_not_ready=include_not_ready_providers,
)
if not skip_tag_check and not is_local_package_version(version_suffix):
run_command(["git", "remote", "rm", "apache-https-for-providers"], check=False, stderr=DEVNULL)
make_sure_remote_apache_exists_and_fetch(github_repository=github_repository)
success_packages = []
skipped_as_already_released_packages = []
suspended_packages = []
wrong_setup_packages = []
error_packages = []
if clean_dist:
get_console().print("\n[warning]Cleaning dist directory before building packages[/]\n")
shutil.rmtree(AIRFLOW_DIST_PATH, ignore_errors=True)
AIRFLOW_DIST_PATH.mkdir(parents=True, exist_ok=True)
for provider_id in packages_list:
try:
basic_provider_checks(provider_id)
created = _build_provider_distributions(
provider_id,
version_suffix,
distribution_format,
skip_tag_check,
skip_deleting_generated_files,
)
except PrepareReleasePackageTagExistException:
skipped_as_already_released_packages.append(provider_id)
except PrepareReleasePackageWrongSetupException:
wrong_setup_packages.append(provider_id)
except PrepareReleasePackageErrorBuildingPackageException:
error_packages.append(provider_id)
except PackageSuspendedException:
suspended_packages.append(provider_id)
else:
get_console().print(f"\n[success]Generated package [special]{provider_id}")
if created:
success_packages.append(provider_id)
else:
skipped_as_already_released_packages.append(provider_id)
get_console().print()
get_console().print("\n[info]Summary of prepared packages:\n")
provider_action_summary("Success", MessageType.SUCCESS, success_packages)
provider_action_summary(
"Skipped as already released", MessageType.INFO, skipped_as_already_released_packages
)
provider_action_summary("Suspended", MessageType.WARNING, suspended_packages)
provider_action_summary("Wrong setup generated", MessageType.ERROR, wrong_setup_packages)
provider_action_summary("Errors", MessageType.ERROR, error_packages)
if error_packages or wrong_setup_packages:
get_console().print("\n[errors]There were errors when generating packages. Exiting!\n")
sys.exit(1)
if not success_packages and not skipped_as_already_released_packages:
get_console().print("\n[warning]No packages prepared!\n")
sys.exit(0)
get_console().print("\n[success]Successfully built packages!\n\n")
packages = DistributionPackageInfo.dist_packages(
distribution_format=distribution_format,
dist_directory=AIRFLOW_DIST_PATH,
build_type=DistributionBuildType.PROVIDERS,
)
get_console().print()
_check_sdist_to_wheel_dists(packages)
get_console().print("\n[info]Packages available in dist:\n")
for dist_info in packages:
get_console().print(str(dist_info))
get_console().print()
def run_generate_constraints(
shell_params: ShellParams,
output: Output | None,
) -> tuple[int, str]:
result = execute_command_in_shell(
shell_params,
project_name=f"constraints-{shell_params.python.replace('.', '-')}",
command="/opt/airflow/scripts/in_container/run_generate_constraints.py",
output=output,
)
return (
result.returncode,
f"Constraints {shell_params.airflow_constraints_mode}:{shell_params.python}",
)
CONSTRAINT_PROGRESS_MATCHER = (
r"Found|Uninstalling|uninstalled|Collecting|Downloading|eta|Running|Installing|built|Attempting"
)
def list_generated_constraints(output: Output | None):
get_console(output=output).print("\n[info]List of generated files in './files' folder:[/]\n")
found_files = Path("./files").rglob("*")
for file in sorted(found_files):
if file.is_file():
get_console(output=output).print(file.as_posix())
get_console(output=output).print()
def run_generate_constraints_in_parallel(
shell_params_list: list[ShellParams],
python_version_list: list[str],
include_success_outputs: bool,
parallelism: int,
skip_cleanup: bool,
debug_resources: bool,
):
"""Run generate constraints in parallel"""
with ci_group(f"Constraints for {python_version_list}"):
all_params = [
f"Constraints {shell_params.airflow_constraints_mode}:{shell_params.python}"
for shell_params in shell_params_list
]
with run_with_pool(
parallelism=parallelism,
all_params=all_params,
debug_resources=debug_resources,
progress_matcher=GenericRegexpProgressMatcher(
regexp=CONSTRAINT_PROGRESS_MATCHER, lines_to_search=6
),
) as (pool, outputs):
results = [
pool.apply_async(
run_generate_constraints,
kwds={
"shell_params": shell_params,
"output": outputs[index],
},
)
for index, shell_params in enumerate(shell_params_list)
]
check_async_run_results(
results=results,
success_message="All constraints are generated.",
outputs=outputs,
include_success_outputs=include_success_outputs,
skip_cleanup=skip_cleanup,
summarize_on_ci=SummarizeAfter.SUCCESS,
summary_start_regexp=".*Constraints generated in.*",
)
@release_management_group.command(
name="tag-providers",
help="Generates tags for airflow provider releases.",
)
@click.option(
"--clean-tags/--no-clean-tags",
default=True,
is_flag=True,
envvar="CLEAN_TAGS",
help="Delete tags (both local and remote) that are created due to github connectivity "
"issues to avoid errors. The default behaviour would be to clean both local and remote tags.",
show_default=True,
)
@click.option(
"--release-date",
type=str,
help="Date of the release in YYYY-MM-DD format.",
required=True,
envvar="RELEASE_DATE",
)
@option_dry_run
@option_verbose
def tag_providers(
clean_tags: bool,
release_date: str,
):
found_remote = None
remotes = ["origin", "apache"]
for remote in remotes:
try:
result = run_command(
["git", "remote", "get-url", "--push", remote],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
text=True,
)
if "apache/airflow.git" in result.stdout:
found_remote = remote
break
except subprocess.CalledProcessError:
pass
if found_remote is None:
raise ValueError("Could not find the remote configured to push to apache/airflow")
extra_flags = []
tags = []
if clean_tags:
extra_flags.append("--force")
for file in os.listdir(os.path.join(SOURCE_DIR_PATH, "dist")):
if file.endswith(".whl"):
match = re.match(r".*airflow_providers_(.*)-(.*)-py3.*", file)
if match:
provider = f"providers-{match.group(1).replace('_', '-')}"
tag = f"{provider}/{match.group(2)}"
get_console().print(f"[info]Creating tag: {tag}")
run_command(
["git", "tag", tag, *extra_flags, "-m", f"Release {release_date} of providers"],
check=True,
)
tags.append(tag)
if tags:
push_result = run_command(
["git", "push", found_remote, *extra_flags, *tags],
check=True,
)
if push_result.returncode == 0:
get_console().print("\n[success]Tags pushed successfully.[/]")
@release_management_group.command(
name="generate-constraints",
help="Generates pinned constraint files with all extras from pyproject.toml in parallel.",
)
@option_python
@option_run_in_parallel
@option_parallelism
@option_skip_cleanup
@option_debug_resources
@option_python_versions
@option_airflow_constraints_mode_ci
@option_github_repository
@option_use_uv
@option_verbose
@option_dry_run
@option_answer
def generate_constraints(
airflow_constraints_mode: str,
debug_resources: bool,
github_repository: str,
parallelism: int,
python: str,
python_versions: str,
run_in_parallel: bool,
skip_cleanup: bool,
use_uv: bool,
):
perform_environment_checks()
check_remote_ghcr_io_commands()
fix_ownership_using_docker()
cleanup_python_generated_files()
if run_in_parallel:
given_answer = user_confirm(
f"Did you build all CI images {python_versions} with --upgrade-to-newer-dependencies flag set?",
)
else:
given_answer = user_confirm(
f"Did you build CI image {python} with --upgrade-to-newer-dependencies flag set?",
)
if given_answer != Answer.YES:
if run_in_parallel:
get_console().print("\n[info]Use this command to build the images:[/]\n")
get_console().print(
f" breeze ci-image build --run-in-parallel --python-versions '{python_versions}' "
f"--upgrade-to-newer-dependencies\n"
)
else:
shell_params = ShellParams(
python=python,
github_repository=github_repository,
)
get_console().print("\n[info]Use this command to build the image:[/]\n")
get_console().print(
f" breeze ci-image build --python '{shell_params.python}' "
f"--upgrade-to-newer-dependencies\n"
)
sys.exit(1)
if run_in_parallel:
python_version_list = get_python_version_list(python_versions)
shell_params_list = [
ShellParams(
airflow_constraints_mode=airflow_constraints_mode,
github_repository=github_repository,
python=python,
use_uv=use_uv,
)
for python in python_version_list
]
run_generate_constraints_in_parallel(
debug_resources=debug_resources,
include_success_outputs=True,
parallelism=parallelism,
python_version_list=python_version_list,
shell_params_list=shell_params_list,
skip_cleanup=skip_cleanup,
)
fix_ownership_using_docker()
else:
shell_params = ShellParams(
airflow_constraints_mode=airflow_constraints_mode,
github_repository=github_repository,
python=python,
use_uv=use_uv,
)
return_code, info = run_generate_constraints(
shell_params=shell_params,
output=None,
)
fix_ownership_using_docker()
if return_code != 0:
get_console().print(f"[error]There was an error when generating constraints: {info}[/]")
sys.exit(return_code)
list_generated_constraints(output=None)
SDIST_FILENAME_PREFIX = "apache_airflow_providers_"
WHEEL_FILENAME_PREFIX = "apache_airflow_providers-"
SDIST_FILENAME_PATTERN = re.compile(rf"{SDIST_FILENAME_PREFIX}(.*)-[0-9].*\.tar\.gz")
WHEEL_FILENAME_PATTERN = re.compile(rf"{WHEEL_FILENAME_PREFIX}(.*)-[0-9].*\.whl")
def _get_all_providers_in_dist(
filename_prefix: str, filename_pattern: re.Pattern[str]
) -> Generator[str, None, None]:
for file in AIRFLOW_DIST_PATH.glob(f"{filename_prefix}*.tar.gz"):
matched = filename_pattern.match(file.name)
if not matched:
raise SystemExit(f"Cannot parse provider package name from {file.name}")
provider_id = matched.group(1).replace("_", ".")
yield provider_id
def get_all_providers_in_dist(distribution_format: str, install_selected_providers: str) -> list[str]:
"""
Returns all providers in dist, optionally filtered by install_selected_providers.
:param distribution_format: package format to look for
:param install_selected_providers: list of providers to filter by
"""
if distribution_format == "sdist":
all_found_providers = list(
_get_all_providers_in_dist(
filename_prefix=SDIST_FILENAME_PREFIX, filename_pattern=SDIST_FILENAME_PATTERN
)
)
elif distribution_format == "wheel":
all_found_providers = list(
_get_all_providers_in_dist(
filename_prefix=WHEEL_FILENAME_PREFIX, filename_pattern=WHEEL_FILENAME_PATTERN
)
)
else:
raise SystemExit(f"Unknown package format {distribution_format}")
if install_selected_providers:
filter_list = install_selected_providers.split(",")
return [provider for provider in all_found_providers if provider in filter_list]
return all_found_providers
def _run_command_for_providers(
shell_params: ShellParams,
list_of_providers: list[str],
index: int,
output: Output | None,
) -> tuple[int, str]:
shell_params.install_selected_providers = " ".join(list_of_providers)
result_command = execute_command_in_shell(shell_params, project_name=f"providers-{index}", output=output)
return result_command.returncode, f"{list_of_providers}"
SDIST_INSTALL_PROGRESS_REGEXP = r"Processing .*|Requirement already satisfied:.*| Created wheel.*"
@release_management_group.command(
name="install-provider-distributions",
help="Installs provider distributiobs that can be found in dist.",
)
@option_airflow_constraints_mode_ci
@option_airflow_constraints_location
@option_airflow_constraints_reference
@option_airflow_extras
@option_clean_airflow_installation
@option_debug_resources
@option_dry_run
@option_github_repository
@option_install_airflow_with_constraints_default_true
@option_include_success_outputs
@option_install_selected_providers
@option_installation_distribution_format
@option_mount_sources
@option_parallelism
@option_providers_constraints_location
@option_providers_constraints_mode_ci
@option_providers_constraints_reference
@option_providers_skip_constraints
@option_python
@option_run_in_parallel
@option_skip_cleanup
@option_use_airflow_version
@option_allow_pre_releases
@option_use_distributions_from_dist
@option_verbose
def install_provider_distributions(
airflow_constraints_location: str,
airflow_constraints_mode: str,
airflow_constraints_reference: str,
install_airflow_with_constraints: bool,
airflow_extras: str,
allow_pre_releases: bool,
clean_airflow_installation: bool,
debug_resources: bool,
github_repository: str,
include_success_outputs: bool,
install_selected_providers: str,
mount_sources: str,
distribution_format: str,
providers_constraints_location: str,
providers_constraints_mode: str,
providers_constraints_reference: str,
providers_skip_constraints: bool,
python: str,
parallelism: int,
run_in_parallel: bool,
skip_cleanup: bool,
use_airflow_version: str | None,
use_distributions_from_dist: bool,
):
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
shell_params = ShellParams(
airflow_constraints_location=airflow_constraints_location,
airflow_constraints_mode=airflow_constraints_mode,
airflow_constraints_reference=airflow_constraints_reference,
airflow_extras=airflow_extras,
install_airflow_with_constraints=install_airflow_with_constraints,
allow_pre_releases=allow_pre_releases,
# We just want to install the providers by entrypoint
# we do not need to run any command in the container
extra_args=("exit 0",),
clean_airflow_installation=clean_airflow_installation,
github_repository=github_repository,
install_selected_providers=install_selected_providers,
mount_sources=mount_sources,
distribution_format=distribution_format,
providers_constraints_location=providers_constraints_location,
providers_constraints_mode=providers_constraints_mode,
providers_constraints_reference=providers_constraints_reference,
providers_skip_constraints=providers_skip_constraints,
python=python,
use_airflow_version=use_airflow_version,
use_distributions_from_dist=use_distributions_from_dist,
)
rebuild_or_pull_ci_image_if_needed(command_params=shell_params)
if run_in_parallel:
list_of_all_providers = get_all_providers_in_dist(
distribution_format=distribution_format, install_selected_providers=install_selected_providers
)
get_console().print(
f"[info]Splitting {len(list_of_all_providers)} providers into max {parallelism} chunks"
)
provider_chunks = [sorted(list_of_all_providers[i::parallelism]) for i in range(parallelism)]
# filter out empty ones
provider_chunks = [chunk for chunk in provider_chunks if chunk]
if not provider_chunks:
get_console().print("[info]No providers to install")
sys.exit(1)
total_num_providers = 0
for index, chunk in enumerate(provider_chunks):
get_console().print(f"Chunk {index}: {chunk} ({len(chunk)} providers)")
total_num_providers += len(chunk)
# For every chunk make sure that all direct dependencies are installed as well
# because there might be new version of the downstream dependency that is not
# yet released in PyPI, so we need to make sure it is installed from dist
for chunk in provider_chunks:
for provider in chunk.copy():
downstream_dependencies = get_related_providers(
provider, upstream_dependencies=False, downstream_dependencies=True
)
for dependency in downstream_dependencies:
if dependency not in chunk:
chunk.append(dependency)
if len(list_of_all_providers) != total_num_providers:
msg = (
f"Total providers {total_num_providers} is different "
f"than {len(list_of_all_providers)} (just to be sure"
f" no rounding errors crippled in)"
)
raise RuntimeError(msg)
parallelism = min(parallelism, len(provider_chunks))
with ci_group(f"Installing providers in {parallelism} chunks"):
all_params = [f"Chunk {n}" for n in range(parallelism)]
with run_with_pool(
parallelism=parallelism,
all_params=all_params,
debug_resources=debug_resources,
progress_matcher=GenericRegexpProgressMatcher(
regexp=SDIST_INSTALL_PROGRESS_REGEXP, lines_to_search=10
),
) as (pool, outputs):
results = [
pool.apply_async(
_run_command_for_providers,
kwds={
"shell_params": shell_params,
"list_of_providers": list_of_providers,
"output": outputs[index],
"index": index,
},
)
for index, list_of_providers in enumerate(provider_chunks)
]
check_async_run_results(
results=results,
success_message="All packages installed successfully",
outputs=outputs,
include_success_outputs=include_success_outputs,
skip_cleanup=skip_cleanup,
)
else:
result_command = execute_command_in_shell(shell_params, project_name="providers")
fix_ownership_using_docker()
sys.exit(result_command.returncode)
@release_management_group.command(
name="verify-provider-distributions",
help="Verifies if all provider code is following expectations for providers.",
)
@option_airflow_constraints_mode_ci
@option_airflow_constraints_location
@option_airflow_constraints_reference
@option_airflow_extras
@option_clean_airflow_installation
@option_dry_run
@option_github_repository
@option_install_airflow_with_constraints
@option_install_selected_providers
@option_installation_distribution_format
@option_mount_sources
@option_python
@option_providers_constraints_location
@option_providers_constraints_mode_ci
@option_providers_constraints_reference
@option_providers_skip_constraints
@option_use_airflow_version
@option_allow_pre_releases
@option_use_distributions_from_dist
@option_verbose
def verify_provider_distributions(
airflow_constraints_location: str,
airflow_constraints_mode: str,
airflow_constraints_reference: str,
airflow_extras: str,
clean_airflow_installation: bool,
github_repository: str,
install_airflow_with_constraints: bool,
install_selected_providers: str,
mount_sources: str,
distribution_format: str,
providers_constraints_location: str,
providers_constraints_mode: str,
providers_constraints_reference: str,
providers_skip_constraints: bool,
python: str,
use_airflow_version: str | None,
allow_pre_releases: bool,
use_distributions_from_dist: bool,
):
if install_selected_providers and not use_distributions_from_dist:
get_console().print("Forcing use_distributions_from_dist as installing selected_providers is set")
use_distributions_from_dist = True
perform_environment_checks()
fix_ownership_using_docker()
cleanup_python_generated_files()
shell_params = ShellParams(
airflow_constraints_location=airflow_constraints_location,
airflow_constraints_mode=airflow_constraints_mode,
airflow_constraints_reference=airflow_constraints_reference,
airflow_extras=airflow_extras,
allow_pre_releases=allow_pre_releases,
clean_airflow_installation=clean_airflow_installation,
github_repository=github_repository,
install_airflow_with_constraints=install_airflow_with_constraints,
mount_sources=mount_sources,
distribution_format=distribution_format,
providers_constraints_location=providers_constraints_location,
providers_constraints_mode=providers_constraints_mode,
providers_constraints_reference=providers_constraints_reference,
providers_skip_constraints=providers_skip_constraints,
python=python,
use_airflow_version=use_airflow_version,
use_distributions_from_dist=use_distributions_from_dist,
)
rebuild_or_pull_ci_image_if_needed(command_params=shell_params)
result_command = execute_command_in_shell(
shell_params,
project_name="providers",
command="python /opt/airflow/scripts/in_container/verify_providers.py",
)
fix_ownership_using_docker()
sys.exit(result_command.returncode)
def convert_build_args_dict_to_array_of_args(build_args: dict[str, str]) -> list[str]:
array_of_args = []
for key, value in build_args.items():
array_of_args.append("--build-arg")
array_of_args.append(f"{key}={value}")
return array_of_args
def alias_image(image_from: str, image_to: str):
get_console().print(f"[info]Creating {image_to} alias for {image_from}[/]")
run_command(
["regctl", "image", "copy", "--force-recursive", "--digest-tags", image_from, image_to],
)
def run_docs_publishing(
package_name: str,
airflow_site_directory: str,
override_versioned: bool,
verbose: bool,
output: Output | None,
) -> tuple[int, str]:
from airflow_breeze.utils.docs_publisher import DocsPublisher
builder = DocsPublisher(package_name=package_name, output=output, verbose=verbose)
return builder.publish(override_versioned=override_versioned, airflow_site_dir=airflow_site_directory)
PUBLISHING_DOCS_PROGRESS_MATCHER = r"Publishing docs|Copy directory"
def run_publish_docs_in_parallel(
distributions_list: tuple[str, ...],
airflow_site_directory: str,
override_versioned: bool,
parallelism: int,
debug_resources: bool,
):
"""Run docs publishing in parallel"""
success_entries = []
skipped_entries = []
with ci_group("Publishing docs for packages"):
all_params = [f"Publishing docs {package_name}" for package_name in distributions_list]
with run_with_pool(
parallelism=parallelism,
all_params=all_params,
debug_resources=debug_resources,
progress_matcher=GenericRegexpProgressMatcher(
regexp=PUBLISHING_DOCS_PROGRESS_MATCHER, lines_to_search=6
),
) as (pool, outputs):
results = [
pool.apply_async(
run_docs_publishing,
kwds={
"package_name": package_name,
"airflow_site_directory": airflow_site_directory,
"override_versioned": override_versioned,
"output": outputs[index],
"verbose": get_verbose(),
},
)
for index, package_name in enumerate(distributions_list)
]
# Iterate over the results and collect success and skipped entries
for result in results:
return_code, message = result.get()
if return_code == 0:
success_entries.append(message)
else:
skipped_entries.append(message)
get_console().print("[blue]Summary:")
need_rule = False
if len(success_entries):
get_console().print("[success]Packages published:")
for entry in success_entries:
get_console().print(f"[success]{entry}")
need_rule = True
if need_rule:
get_console().rule()
if len(skipped_entries):
get_console().print("\n[warning]Packages skipped:")
for entry in skipped_entries:
get_console().print(f"[warning]{entry}")
@release_management_group.command(
name="publish-docs",
help="Command to publish generated documentation to airflow-site",
)
@argument_doc_packages
@option_airflow_site_directory
@option_debug_resources
@option_dry_run
@option_include_not_ready_providers
@option_include_removed_providers
@click.option("-s", "--override-versioned", help="Overrides versioned directories.", is_flag=True)
@click.option(
"--package-filter",
help="Filter(s) to use more than one can be specified. You can use glob pattern matching the "
"full package name, for example `apache-airflow-providers-*`. Useful when you want to select"
"several similarly named packages together.",
type=str,
multiple=True,
)
@click.option(
"--distributions-list",
envvar="DISTRIBUTIONS_LIST",
type=str,
help="Optional, contains space separated list of package ids that are processed for documentation "
"building, and document publishing. It is an easier alternative to adding individual packages as"
" arguments to every command. This overrides the packages passed as arguments.",
)
@option_parallelism
@option_run_in_parallel
@option_verbose
def publish_docs(
airflow_site_directory: str,
debug_resources: bool,
doc_packages: tuple[str, ...],
include_not_ready_providers: bool,
include_removed_providers: bool,
override_versioned: bool,
package_filter: tuple[str, ...],
distributions_list: str,
parallelism: int,
run_in_parallel: bool,
):
"""Publishes documentation to airflow-site."""
if not os.path.isdir(airflow_site_directory):
get_console().print(
"\n[error]location pointed by airflow_site_dir is not valid. "
"Provide the path of cloned airflow-site repo\n"
)
packages_list_as_tuple: tuple[str, ...] = ()
if distributions_list and len(distributions_list):
get_console().print(
f"\n[info]Populating provider list from DISTRIBUTIONS_LIST env as {distributions_list}"
)
# Override doc_packages with values from DISTRIBUTIONS_LIST
packages_list_as_tuple = tuple(distributions_list.split(" "))
if doc_packages and packages_list_as_tuple:
get_console().print(
f"[warning]Both package arguments and --distributions-list / DISTRIBUTIONS_LIST passed. "
f"Overriding to {packages_list_as_tuple}"
)
doc_packages = packages_list_as_tuple or doc_packages
current_packages = find_matching_long_package_names(
short_packages=expand_all_provider_distributions(
short_doc_packages=doc_packages,
include_removed=include_removed_providers,
include_not_ready=include_not_ready_providers,
),
filters=package_filter,
)
print(f"Publishing docs for {len(current_packages)} package(s)")
for pkg in current_packages:
print(f" - {pkg}")
print()
if run_in_parallel:
run_publish_docs_in_parallel(
distributions_list=current_packages,
parallelism=parallelism,
debug_resources=debug_resources,
airflow_site_directory=airflow_site_directory,
override_versioned=override_versioned,
)
else:
success_entries = []
skipped_entries = []
for package_name in current_packages:
return_code, message = run_docs_publishing(
package_name, airflow_site_directory, override_versioned, verbose=get_verbose(), output=None
)
if return_code == 0:
success_entries.append(message)
else:
skipped_entries.append(message)
get_console().print("[blue]Summary:")
need_rule = False
if len(success_entries):
get_console().print("[success]Packages published:")
for entry in success_entries:
get_console().print(f"[success]{entry}")
need_rule = True
if need_rule:
get_console().rule()
if len(skipped_entries):
get_console().print("\n[warning]Packages skipped:")
for entry in skipped_entries:
get_console().print(f"[warning]{entry}")
@release_management_group.command(
name="add-back-references",
help="Command to add back references for documentation to make it backward compatible.",
)
@option_airflow_site_directory
@option_include_not_ready_providers
@option_include_removed_providers
@argument_doc_packages
@option_dry_run
@option_verbose
@click.option(
"--head-ref",
help="The branch of redirect files to use.",
default=AIRFLOW_BRANCH,
envvar="HEAD_REF",
show_default=True,
)
@click.option(
"--head-repo",
help="The repository of redirect files to use.",
default=APACHE_AIRFLOW_GITHUB_REPOSITORY,
envvar="HEAD_REPO",
show_default=True,
)
def add_back_references(
airflow_site_directory: str,
include_not_ready_providers: bool,
include_removed_providers: bool,
doc_packages: tuple[str, ...],
head_ref: str,
head_repo: str,
):
# head_ref and head_repo could be empty in canary runs
if head_ref == "":
head_ref = AIRFLOW_BRANCH
if head_repo == "":
head_repo = APACHE_AIRFLOW_GITHUB_REPOSITORY
"""Adds back references for documentation generated by build-docs and publish-docs"""
site_path = Path(airflow_site_directory)
if not site_path.is_dir():
get_console().print(
"\n[error]location pointed by airflow_site_dir is not valid. "
"Provide the path of cloned airflow-site repo\n"
)
sys.exit(1)
if not doc_packages:
get_console().print(
"\n[error]You need to specify at least one package to generate back references for\n"
)
sys.exit(1)
start_generating_back_references(
site_path,
list(
expand_all_provider_distributions(
short_doc_packages=doc_packages,
include_removed=include_removed_providers,
include_not_ready=include_not_ready_providers,
)
),
head_ref=head_ref,
head_repo=head_repo,
)
@release_management_group.command(
name="clean-old-provider-artifacts",
help="Cleans the old provider artifacts",
)
@option_directory
@option_dry_run
@option_verbose
def clean_old_provider_artifacts(
directory: str,
):
"""Cleans up the old airflow providers artifacts in order to maintain
only one provider version in the release SVN folder and one -source artifact."""
cleanup_suffixes = [
".tar.gz",
".tar.gz.sha512",
".tar.gz.asc",
"-py3-none-any.whl",
"-py3-none-any.whl.sha512",
"-py3-none-any.whl.asc",
]
for suffix in cleanup_suffixes:
get_console().print(f"[info]Running provider cleanup for suffix: {suffix}[/]")
package_types_dicts: dict[str, list[VersionedFile]] = defaultdict(list)
os.chdir(directory)
for file in glob.glob(f"*{suffix}"):
if "-source.tar.gz" in file:
versioned_file = split_date_version_and_suffix(file, "-source" + suffix)
else:
versioned_file = split_version_and_suffix(file, suffix)
package_types_dicts[versioned_file.type].append(versioned_file)
for package_types in package_types_dicts.values():
package_types.sort(key=operator.attrgetter("comparable_version"))
for package_types in package_types_dicts.values():
if len(package_types) == 1:
versioned_file = package_types[0]
get_console().print(
f"[success]Leaving the only version: "
f"{versioned_file.base + versioned_file.version + versioned_file.suffix}[/]"
)
# Leave only last version from each type
for versioned_file in package_types[:-1]:
get_console().print(
f"[warning]Removing {versioned_file.file_name} as they are older than remaining file: "
f"{package_types[-1].file_name}[/]"
)
command = ["svn", "rm", versioned_file.file_name]
run_command(command, check=False)
def alias_images(
airflow_version: str,
dockerhub_repo: str,
python_versions: list[str],
image_prefix: str,
skip_latest: bool = False,
):
get_console().print("[info]Waiting for a few seconds for the new images to refresh.[/]")
time.sleep(10)
get_console().print("[info]Aliasing images with links to the newly created images.[/]")
for python in python_versions:
# Always alias the last python version to point to the non-python version
if python == DEFAULT_PYTHON_MAJOR_MINOR_VERSION_FOR_IMAGES:
get_console().print(
f"[info]Aliasing the {image_prefix}{airflow_version}-python{python} "
f"version with {image_prefix}{airflow_version}[/]"
)
alias_image(
f"{dockerhub_repo}:{image_prefix}{airflow_version}-python{python}",
f"{dockerhub_repo}:{image_prefix}{airflow_version}",
)
if not skip_latest:
# if we are taging latest images, we also need to alias the non-version images
get_console().print(
f"[info]Aliasing {image_prefix}{airflow_version}-python{python} "
f"version with {image_prefix}latest-python{python}[/]"
)
alias_image(
f"{dockerhub_repo}:{image_prefix}{airflow_version}-python{python}",
f"{dockerhub_repo}:{image_prefix}latest-python{python}",
)
if image_prefix == "":
alias_image(
f"{dockerhub_repo}:{airflow_version}-python{python}",
f"{dockerhub_repo}:latest-python{python}",
)
if python == DEFAULT_PYTHON_MAJOR_MINOR_VERSION_FOR_IMAGES:
alias_image(
f"{dockerhub_repo}:{image_prefix}{airflow_version}",
f"{dockerhub_repo}:{image_prefix}latest",
)
def check_skip_latest(airflow_version, skip_latest):
if is_pre_release(airflow_version):
get_console().print(
f"[warning]Skipping latest image tagging as this is a pre-release version: {airflow_version}"
)
skip_latest = True
else:
if skip_latest:
get_console().print("[info]Skipping latest image tagging as user requested it.[/]")
else:
get_console().print(
"[info]Also tagging the images with latest tags as this is release version.[/]"
)
check_docker_buildx_plugin()
return skip_latest
@release_management_group.command(
name="release-prod-images", help="Release production images to DockerHub (needs DockerHub permissions)."
)
@click.option("--airflow-version", required=True, help="Airflow version to release (2.3.0, 2.3.0rc1 etc.)")
@option_dry_run
@click.option(
"--dockerhub-repo",
default=APACHE_AIRFLOW_GITHUB_REPOSITORY,
show_default=True,
envvar="DOCKERHUB_REPO",
help="DockerHub repository for the images",
)
@option_python_no_default
@click.option(
"--platform",
type=BetterChoice(ALLOWED_PLATFORMS),
default=MULTI_PLATFORM,
show_default=True,
envvar="PLATFORM",
help="Platform to build images for (if not specified, multiplatform images will be built.",
)
@option_commit_sha
@click.option(
"--skip-latest",
is_flag=True,
envvar="SKIP_LATEST",
help="Whether to skip publishing the latest images (so that 'latest' images are not updated). "
"This should only be used if you release image for previous branches. Automatically set when "
"rc/alpha/beta images are built.",
)
@click.option(
"--include-pre-release",
is_flag=True,
envvar="INCLUDE_PRE_RELEASE",
help="Whether to Include pre-release distributions from PyPI when building images. Useful when we "
"want to build an RC image with RC provider versions.",
)
@click.option(
"--slim-images",
is_flag=True,
help="Whether to prepare slim images instead of the regular ones.",
envvar="SLIM_IMAGES",
)
@click.option(
"--metadata-folder",
type=click.Path(dir_okay=True, file_okay=False, writable=True, path_type=Path),
envvar="METADATA_FOLDER",
help="Folder to write the build metadata to. When this option is specified the image is pushed to registry"
"only by digests not by the tag because we are going to merge several images in multi-platform one.",
)
@option_verbose
def release_prod_images(
airflow_version: str,
dockerhub_repo: str,
slim_images: bool,
platform: str,
python: str | None,
commit_sha: str | None,
skip_latest: bool,
include_pre_release: bool,
metadata_folder: Path | None,
):
perform_environment_checks()
check_remote_ghcr_io_commands()
skip_latest = check_skip_latest(airflow_version, skip_latest)
check_airflow_cache_builder_configured()
from packaging.version import Version
include_pre_release = include_pre_release or Version(airflow_version).is_prerelease
if include_pre_release:
get_console().print("[warning]Including pre-releases when considering dependencies.[/]")
if metadata_folder:
if platform == MULTI_PLATFORM:
get_console().print(
"[error]Cannot use metadata folder with multi-platform image. Use "
"--platform to limit it to a single platform only[/]"
)
sys.exit(1)
get_console().print(
f"[info]Will push images to registry only by digests and store metadata files in "
f"the {metadata_folder}[/]"
)
if not metadata_folder.exists():
metadata_folder.mkdir(parents=True)
else:
check_regctl_installed()
python_versions = CURRENT_PYTHON_MAJOR_MINOR_VERSIONS if python is None else [python]
if slim_images:
image_prefix = "slim-"
image_type = "slim"
else:
image_prefix = ""
image_type = "regular"
for python in python_versions:
build_args = {
"AIRFLOW_CONSTRAINTS": "constraints-no-providers",
"BASE_IMAGE": "debian:bookworm-slim",
"AIRFLOW_PYTHON_VERSION": ALL_PYTHON_VERSION_TO_PATCHLEVEL_VERSION.get(python, python),
"AIRFLOW_VERSION": airflow_version,
"INCLUDE_PRE_RELEASE": "true" if include_pre_release else "false",
"INSTALL_DISTRIBUTIONS_FROM_CONTEXT": "false",
"DOCKER_CONTEXT_FILES": "./docker-context-files",
}
if commit_sha:
build_args["COMMIT_SHA"] = commit_sha
if slim_images:
build_args["AIRFLOW_EXTRAS"] = ""
get_console().print(f"[info]Building {image_type} {airflow_version} image for Python {python}[/]")
python_build_args = deepcopy(build_args)
image_name = f"{dockerhub_repo}:{image_prefix}{airflow_version}-python{python}"
docker_buildx_command = [
"docker",
"buildx",
"build",
"--builder",
"airflow_cache",
*convert_build_args_dict_to_array_of_args(build_args=python_build_args),
"--platform",
platform,
"--push",
]
metadata_file: Path | None = None
if metadata_folder:
metadata_file = (
metadata_folder
/ f"metadata-{airflow_version}-{image_prefix}{platform.replace('/', '_')}-{python}.json"
)
docker_buildx_command += [
"-t",
dockerhub_repo,
"--metadata-file",
str(metadata_file),
"--output",
"type=image,push-by-digest=true,name-canonical=true",
]
else:
docker_buildx_command += [
"-t",
image_name,
]
docker_buildx_command += ["."]
run_command(docker_buildx_command)
if metadata_file:
get_console().print(f"[green]Metadata file stored in {metadata_file}")
if python == DEFAULT_PYTHON_MAJOR_MINOR_VERSION_FOR_IMAGES and not metadata_file:
get_console().print(
f"[info]Aliasing the latest {python} version to {image_prefix}{airflow_version}[/]"
)
alias_image(
image_name,
f"{dockerhub_repo}:{image_prefix}{airflow_version}",
)
if not metadata_folder:
alias_images(airflow_version, dockerhub_repo, python_versions, image_prefix, skip_latest)
@release_management_group.command(
name="merge-prod-images",
help="Merge production images in DockerHub based on digest files (needs DockerHub permissions).",
)
@click.option("--airflow-version", required=True, help="Airflow version to release (2.3.0, 2.3.0rc1 etc.)")
@option_dry_run
@click.option(
"--dockerhub-repo",
default=APACHE_AIRFLOW_GITHUB_REPOSITORY,
show_default=True,
envvar="DOCKERHUB_REPO",
help="DockerHub repository for the images",
)
@option_python_no_default
@click.option(
"--skip-latest",
is_flag=True,
envvar="SKIP_LATEST",
help="Whether to skip publishing the latest images (so that 'latest' images are not updated). "
"This should only be used if you release image for previous branches. Automatically set when "
"rc/alpha/beta images are built.",
)
@click.option(
"--slim-images",
is_flag=True,
envvar="SLIM_IMAGES",
help="Whether to prepare slim images instead of the regular ones.",
)
@click.option(
"--metadata-folder",
type=click.Path(exists=True, dir_okay=True, file_okay=False, writable=True, path_type=Path),
envvar="METADATA_FOLDER",
help="Folder to write the build metadata to. When this option is specified the image is pushed to registry"
"only by digests not by the tag because we are going to merge several images in multi-platform one.",
required=True,
)
@option_verbose
def merge_prod_images(
airflow_version: str,
dockerhub_repo: str,
slim_images: bool,
python: str | None,
skip_latest: bool,
metadata_folder: Path,
):
perform_environment_checks()
check_remote_ghcr_io_commands()
check_docker_buildx_plugin()
check_regctl_installed()
skip_latest = check_skip_latest(airflow_version, skip_latest)
python_versions = CURRENT_PYTHON_MAJOR_MINOR_VERSIONS if python is None else [python]
if slim_images:
image_prefix = "slim-"
else:
image_prefix = ""
for python in python_versions:
metadata_files = list(
metadata_folder.rglob(f"metadata-{airflow_version}-{image_prefix}linux*-{python}.json")
)
image_name = f"{dockerhub_repo}:{image_prefix}{airflow_version}-python{python}"
import json
metadata_array = [json.loads(file.read_text()) for file in metadata_files]
digests_to_merge = [
dockerhub_repo + "@" + metadata_content["containerimage.digest"]
for metadata_content in metadata_array
]
get_console().print(f"[info]Merging {image_name} file from digests found in {metadata_files}[/]")
get_console().print(f"[info]Digests to merge: {digests_to_merge}[/]")
imagetool_command = [
"docker",
"buildx",
"imagetools",
"create",
*digests_to_merge,
"-t",
image_name,
]
run_command(imagetool_command)
alias_images(
airflow_version,
dockerhub_repo,
python_versions,
image_prefix=image_prefix,
skip_latest=skip_latest,
)
def is_package_in_dist(dist_files: list[str], package: str) -> bool:
"""Check if package has been prepared in dist folder."""
return any(
file.startswith(
(
f"apache_airflow_providers_{package.replace('.', '_')}",
f"apache-airflow-providers-{package.replace('.', '-')}",
)
)
for file in dist_files
)
VERSION_MATCH = re.compile(r"([0-9]+)\.([0-9]+)\.([0-9]+)(.*)")
def get_suffix_from_package_in_dist(dist_files: list[str], package: str) -> str | None:
"""Get suffix from package prepared in dist folder."""
for filename in dist_files:
if filename.startswith(f"apache_airflow_providers_{package.replace('.', '_')}") and filename.endswith(
".tar.gz"
):
file = filename[: -len(".tar.gz")]
version = file.split("-")[-1]
match = VERSION_MATCH.match(version)
if match:
return match.group(4)
return None
def get_prs_for_package(provider_id: str) -> list[int]:
pr_matcher = re.compile(r".*\(#([0-9]*)\)``$")
prs = []
provider_yaml_dict = get_provider_distributions_metadata().get(provider_id)
if not provider_yaml_dict:
raise RuntimeError(f"The provider id {provider_id} does not have provider.yaml file")
current_release_version = provider_yaml_dict["versions"][0]
provider_details = get_provider_details(provider_id)
changelog_lines = provider_details.changelog_path.read_text().splitlines()
extract_prs = False
skip_line = False
for line in changelog_lines:
if skip_line:
# Skip first "....." header
skip_line = False
elif line.strip() == current_release_version:
extract_prs = True
skip_line = True
elif extract_prs:
if len(line) > 1 and all(c == "." for c in line.strip()):
# Header for next version reached
break
if line.startswith(".. Below changes are excluded from the changelog"):
# The reminder of PRs is not important skipping it
break
match_result = pr_matcher.match(line.strip())
if match_result:
prs.append(int(match_result.group(1)))
return prs
def create_github_issue_url(title: str, body: str, labels: Iterable[str]) -> str:
"""
Creates URL to create the issue with title, body and labels.
:param title: issue title
:param body: issue body
:param labels: labels for the issue
:return: URL to use to create the issue
"""
from urllib.parse import quote
quoted_labels = quote(",".join(labels))
quoted_title = quote(title)
quoted_body = quote(body)
return (
f"https://github.com/apache/airflow/issues/new?labels={quoted_labels}&"
f"title={quoted_title}&body={quoted_body}"
)
def get_commented_out_prs_from_provider_changelogs() -> list[int]:
"""
Returns list of PRs that are commented out in the changelog.
:return: list of PR numbers that appear only in comments in changelog.rst files in "providers" dir
"""
pr_matcher = re.compile(r".*\(#([0-9]+)\).*")
commented_prs = set()
# Get all provider distributions
provider_distributions_metadata = get_provider_distributions_metadata()
for provider_id in provider_distributions_metadata.keys():
provider_details = get_provider_details(provider_id)
changelog_path = provider_details.changelog_path
print(f"[info]Checking changelog {changelog_path} for PRs to be excluded automatically.")
if not changelog_path.exists():
continue
changelog_lines = changelog_path.read_text().splitlines()
in_excluded_section = False
for line in changelog_lines:
# Check if we're entering an excluded/commented section
if line.strip().startswith(
".. Below changes are excluded from the changelog"
) or line.strip().startswith(".. Review and move the new changes"):
in_excluded_section = True
continue
# Check if we're exiting the excluded section (new version header or regular content)
# Version headers are lines that contain only dots (like "4.10.1" followed by "......")
# Or lines that start with actual content sections like "Misc", "Features", etc.
if (
in_excluded_section
and line
and not line.strip().startswith("..")
and not line.strip().startswith("*")
):
# end excluded section with empty line
if line.strip() == "":
in_excluded_section = False
# Extract PRs from excluded sections
if in_excluded_section and line.strip().startswith("*"):
match_result = pr_matcher.search(line)
if match_result:
commented_prs.add(int(match_result.group(1)))
return sorted(commented_prs)
@release_management_group.command(
name="generate-issue-content-providers", help="Generates content for issue to test the release."
)
@click.option("--disable-progress", is_flag=True, help="Disable progress bar")
@click.option("--excluded-pr-list", type=str, help="Coma-separated list of PRs to exclude from the issue.")
@click.option(
"--github-token",
envvar="GITHUB_TOKEN",
help=textwrap.dedent(
"""
GitHub token used to authenticate.
You can set omit it if you have GITHUB_TOKEN env variable set.
Can be generated with:
https://github.com/settings/tokens/new?description=Read%20sssues&scopes=repo:status"""
),
)
@click.option(
"--only-available-in-dist",
is_flag=True,
help="Only consider package ids with packages prepared in the dist folder",
)
@argument_provider_distributions
def generate_issue_content_providers(
disable_progress: bool,
excluded_pr_list: str,
github_token: str,
only_available_in_dist: bool,
provider_distributions: list[str],
):
import jinja2
from github import Github, Issue, PullRequest, UnknownObjectException
class ProviderPRInfo(NamedTuple):
provider_id: str
pypi_package_name: str
version: str
pr_list: list[PullRequest.PullRequest | Issue.Issue]
suffix: str
if not provider_distributions:
provider_distributions = list(DEPENDENCIES.keys())
with ci_group("Generates GitHub issue content with people who can test it"):
if excluded_pr_list:
excluded_prs = [int(pr) for pr in excluded_pr_list.split(",")]
else:
excluded_prs = []
commented_prs = get_commented_out_prs_from_provider_changelogs()
get_console().print(
"[info]Automatically excluding {len(commented_prs)} PRs that are only commented out in changelog:"
)
excluded_prs.extend(commented_prs)
all_prs: set[int] = set()
all_retrieved_prs: set[int] = set()
provider_prs: dict[str, list[int]] = {}
files_in_dist = os.listdir(str(AIRFLOW_DIST_PATH)) if only_available_in_dist else []
prepared_package_ids = []
for provider_id in provider_distributions:
if not only_available_in_dist or is_package_in_dist(files_in_dist, provider_id):
get_console().print(f"Extracting PRs for provider {provider_id}")
prepared_package_ids.append(provider_id)
else:
get_console().print(
f"Skipping extracting PRs for provider {provider_id} as it is missing in dist"
)
continue
prs = get_prs_for_package(provider_id)
if not prs:
get_console().print(
f"[warning]Skipping provider {provider_id}. "
"The changelog file doesn't contain any PRs for the release.\n"
)
continue
all_prs.update(prs)
provider_prs[provider_id] = [pr for pr in prs if pr not in excluded_prs]
all_retrieved_prs.update(provider_prs[provider_id])
if not github_token:
# Get GitHub token from gh CLI and set it in environment copy
gh_token_result = run_command(
["gh", "auth", "token"],
capture_output=True,
text=True,
check=False,
)
if gh_token_result.returncode == 0:
github_token = gh_token_result.stdout.strip()
g = Github(github_token)
repo = g.get_repo("apache/airflow")
pull_requests: dict[int, PullRequest.PullRequest | Issue.Issue] = {}
linked_issues: dict[int, list[Issue.Issue]] = {}
all_prs_len = len(all_prs)
all_retrieved_prs_len = len(all_retrieved_prs)
get_console().print(
f"[info] Found {all_prs_len} PRs in the providers. "
f"Retrieving {all_retrieved_prs_len} (excluded {all_prs_len - all_retrieved_prs_len})"
)
get_console().print(f"Retrieved PRs: {all_retrieved_prs}")
excluded_prs = sorted(set(all_prs) - set(all_retrieved_prs))
get_console().print(f"Excluded PRs: {excluded_prs}")
with Progress(console=get_console(), disable=disable_progress) as progress:
task = progress.add_task(f"Retrieving {all_retrieved_prs_len} PRs ", total=all_retrieved_prs_len)
for pr_number in all_retrieved_prs:
progress.console.print(
f"Retrieving PR#{pr_number}: https://github.com/apache/airflow/pull/{pr_number}"
)
pr_or_issue = None
try:
pr_or_issue = repo.get_pull(pr_number)
if pr_or_issue.user.login == "dependabot[bot]":
get_console().print(
f"[yellow]Skipping PR #{pr_number} as it was created by dependabot[/]"
)
continue
pull_requests[pr_number] = pr_or_issue
except UnknownObjectException:
# Fallback to issue if PR not found
try:
pr_or_issue = repo.get_issue(pr_number) # type: ignore[assignment]
except UnknownObjectException:
get_console().print(f"[red]The PR #{pr_number} could not be found[/]")
pull_requests[pr_number] = pr_or_issue # type: ignore[assignment]
# Retrieve linked issues
if pr_number in pull_requests and pull_requests[pr_number].body:
body = " ".join(pull_requests[pr_number].body.splitlines())
linked_issue_numbers = {
int(issue_match.group(1)) for issue_match in ISSUE_MATCH_IN_BODY.finditer(body)
}
for linked_issue_number in linked_issue_numbers:
try:
_ = repo.get_issue(linked_issue_number)
except UnknownObjectException:
progress.console.print(
f"Failed to retrieve linked issue #{linked_issue_number}: is not a issue,"
f"likely a discussion is linked."
)
progress.console.print(
f"Retrieving Linked issue PR#{linked_issue_number}: "
f"https://github.com/apache/airflow/issues/{linked_issue_number}"
)
try:
if pr_number not in linked_issues:
linked_issues[pr_number] = []
linked_issues[pr_number].append(repo.get_issue(linked_issue_number))
except UnknownObjectException:
progress.console.print(
f"Failed to retrieve linked issue #{linked_issue_number}: Unknown Issue"
)
progress.advance(task)
get_provider_distributions_metadata.cache_clear()
providers: dict[str, ProviderPRInfo] = {}
for provider_id in prepared_package_ids:
if provider_id not in provider_prs:
continue
pull_request_list = [pull_requests[pr] for pr in provider_prs[provider_id] if pr in pull_requests]
provider_yaml_dict = get_provider_distributions_metadata().get(provider_id)
if pull_request_list:
if only_available_in_dist:
package_suffix = get_suffix_from_package_in_dist(files_in_dist, provider_id)
else:
package_suffix = ""
providers[provider_id] = ProviderPRInfo(
version=provider_yaml_dict["versions"][0],
provider_id=provider_id,
pypi_package_name=provider_yaml_dict["package-name"],
pr_list=pull_request_list,
suffix=package_suffix if package_suffix else "",
)
template = jinja2.Template(
(Path(__file__).parents[1] / "provider_issue_TEMPLATE.md.jinja2").read_text()
)
issue_content = template.render(providers=providers, linked_issues=linked_issues, date=datetime.now())
get_console().print()
get_console().print(
"[green]Below you can find the issue content that you can use "
"to ask contributor to test providers![/]"
)
get_console().print()
get_console().print()
get_console().print(
"Issue title: [warning]Status of testing Providers that were "
f"prepared on {datetime.now():%B %d, %Y}[/]"
)
get_console().print()
issue_content += "\n"
users: set[str] = set()
for provider_info in providers.values():
for pr in provider_info.pr_list:
if pr.user.login:
users.add("@" + pr.user.login)
issue_content += f"All users involved in the PRs:\n{' '.join(users)}"
syntax = Syntax(issue_content, "markdown", theme="ansi_dark")
get_console().print(syntax)
create_issue = user_confirm("Should I create the issue?")
if create_issue == Answer.YES:
res = run_command(
[
"gh",
"issue",
"create",
"-t",
f"Status of testing Providers that were prepared on {datetime.now():%B %d, %Y}",
"-b",
issue_content,
"-l",
"testing status,kind:meta",
"-w",
],
check=False,
)
if res.returncode != 0:
get_console().print(
"Failed to create issue. If the error is about 'too long URL' you have "
"to create the issue manually by copy&pasting the above output"
)
sys.exit(1)
def get_git_log_command(
verbose: bool,
from_commit: str | None = None,
to_commit: str | None = None,
is_helm_chart: bool = True,
) -> list[str]:
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
if is_helm_chart:
git_cmd.extend(["--", "chart/"])
else:
git_cmd.extend(["--", "."])
if verbose:
get_console().print(f"Command to run: '{' '.join(git_cmd)}'")
return git_cmd
|
DistributionPackageInfo
|
python
|
apache__airflow
|
providers/databricks/tests/unit/databricks/hooks/test_databricks.py
|
{
"start": 53276,
"end": 55667
}
|
class ____(TestDatabricksHookToken):
"""
Tests that `schema` and/or `port` get reflected in the requested API URLs.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=None,
password=None,
extra=json.dumps({"token": TOKEN}),
schema="http",
port=7908,
)
)
self.hook = DatabricksHook()
@mock.patch("airflow.providers.databricks.hooks.databricks_base.requests")
def test_do_api_call_respects_schema(self, mock_requests):
mock_requests.get.return_value.json.return_value = {"foo": "bar"}
ret_val = self.hook._do_api_call(("GET", "2.1/foo/bar"))
assert ret_val == {"foo": "bar"}
mock_requests.get.assert_called_once()
assert mock_requests.get.call_args.args == (f"http://{HOST}:7908/api/2.1/foo/bar",)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_async_do_api_call_respects_schema(self, mock_get):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value={"bar": "baz"})
async with self.hook:
run_page_url = await self.hook._a_do_api_call(("GET", "2.1/foo/bar"))
assert run_page_url == {"bar": "baz"}
mock_get.assert_called_once()
assert mock_get.call_args.args == (f"http://{HOST}:7908/api/2.1/foo/bar",)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_async_do_api_call_only_existing_response_properties_are_read(self, mock_get):
response = mock_get.return_value.__aenter__.return_value
response.mock_add_spec(aiohttp.ClientResponse, spec_set=True)
response.json = AsyncMock(return_value={"bar": "baz"})
async with self.hook:
run_page_url = await self.hook._a_do_api_call(("GET", "2.1/foo/bar"))
assert run_page_url == {"bar": "baz"}
mock_get.assert_called_once()
assert mock_get.call_args.args == (f"http://{HOST}:7908/api/2.1/foo/bar",)
|
TestDatabricksHookConnSettings
|
python
|
celery__celery
|
t/unit/worker/test_bootsteps.py
|
{
"start": 85,
"end": 1329
}
|
class ____:
def test_get_prefix(self):
f = bootsteps.StepFormatter()
s = Mock()
s.last = True
assert f._get_prefix(s) == f.blueprint_prefix
s2 = Mock()
s2.last = False
s2.conditional = True
assert f._get_prefix(s2) == f.conditional_prefix
s3 = Mock()
s3.last = s3.conditional = False
assert f._get_prefix(s3) == ''
def test_node(self):
f = bootsteps.StepFormatter()
f.draw_node = Mock()
step = Mock()
step.last = False
f.node(step, x=3)
f.draw_node.assert_called_with(step, f.node_scheme, {'x': 3})
step.last = True
f.node(step, x=3)
f.draw_node.assert_called_with(step, f.blueprint_scheme, {'x': 3})
def test_edge(self):
f = bootsteps.StepFormatter()
f.draw_edge = Mock()
a, b = Mock(), Mock()
a.last = True
f.edge(a, b, x=6)
f.draw_edge.assert_called_with(a, b, f.edge_scheme, {
'x': 6, 'arrowhead': 'none', 'color': 'darkseagreen3',
})
a.last = False
f.edge(a, b, x=6)
f.draw_edge.assert_called_with(a, b, f.edge_scheme, {
'x': 6,
})
|
test_StepFormatter
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/notifications/ses.py
|
{
"start": 1173,
"end": 5109
}
|
class ____(BaseNotifier):
"""
Amazon Simple Email Service (SES) Notifier.
:param mail_from: Email address to set as email's from
:param to: List of email addresses to set as email's to
:param subject: Email's subject
:param html_content: Content of email in HTML format
:param files: List of paths of files to be attached
:param cc: List of email addresses to set as email's CC
:param bcc: List of email addresses to set as email's BCC
:param mime_subtype: Can be used to specify the subtype of the message. Default = mixed
:param mime_charset: Email's charset. Default = UTF-8.
:param return_path: The email address to which replies will be sent. By default, replies
are sent to the original sender's email address.
:param reply_to: The email address to which message bounces and complaints should be sent.
"Return-Path" is sometimes called "envelope from", "envelope sender", or "MAIL FROM".
:param custom_headers: Additional headers to add to the MIME message.
No validations are run on these values, and they should be able to be encoded.
"""
template_fields: Sequence[str] = (
"aws_conn_id",
"region_name",
"mail_from",
"to",
"subject",
"html_content",
"files",
"cc",
"bcc",
"mime_subtype",
"mime_charset",
"reply_to",
"return_path",
"custom_headers",
)
def __init__(
self,
*,
aws_conn_id: str | None = SesHook.default_conn_name,
region_name: str | None = None,
mail_from: str,
to: str | Iterable[str],
subject: str,
html_content: str,
files: list[str] | None = None,
cc: str | Iterable[str] | None = None,
bcc: str | Iterable[str] | None = None,
mime_subtype: str = "mixed",
mime_charset: str = "utf-8",
reply_to: str | None = None,
return_path: str | None = None,
custom_headers: dict[str, Any] | None = None,
**kwargs,
):
if AIRFLOW_V_3_1_PLUS:
# Support for passing context was added in 3.1.0
super().__init__(**kwargs)
else:
super().__init__()
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.mail_from = mail_from
self.to = to
self.subject = subject
self.html_content = html_content
self.files = files
self.cc = cc
self.bcc = bcc
self.mime_subtype = mime_subtype
self.mime_charset = mime_charset
self.reply_to = reply_to
self.return_path = return_path
self.custom_headers = custom_headers
def _build_send_kwargs(self):
return prune_dict(
{
"mail_from": self.mail_from,
"to": self.to,
"subject": self.subject,
"html_content": self.html_content,
"files": self.files,
"cc": self.cc,
"bcc": self.bcc,
"mime_subtype": self.mime_subtype,
"mime_charset": self.mime_charset,
"reply_to": self.reply_to,
"return_path": self.return_path,
"custom_headers": self.custom_headers,
}
)
@cached_property
def hook(self) -> SesHook:
"""Amazon Simple Email Service (SES) Hook (cached)."""
return SesHook(aws_conn_id=self.aws_conn_id, region_name=self.region_name)
def notify(self, context):
"""Send email using Amazon Simple Email Service (SES)."""
self.hook.send_email(**self._build_send_kwargs())
async def async_notify(self, context):
"""Send email using Amazon Simple Email Service (SES) (async)."""
await self.hook.asend_email(**self._build_send_kwargs())
send_ses_notification = SesNotifier
|
SesNotifier
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/exceptions.py
|
{
"start": 1334,
"end": 1747
}
|
class ____(Exception):
"""Raise when ECS cannot handle the request."""
def __init__(self, failures: list, message: str):
self.failures = failures
self.message = message
super().__init__(message)
def __reduce__(self):
"""Return EcsOperator state and a tuple of failures list and message."""
return EcsOperatorError, (self.failures, self.message)
|
EcsOperatorError
|
python
|
wandb__wandb
|
wandb/errors/links.py
|
{
"start": 440,
"end": 2520
}
|
class ____:
"""A collection of URLs that can be associated with a name."""
def __init__(self) -> None:
self.urls: dict[str, WBURL] = {
"wandb-launch": WBURL(
"https://wandb.me/launch",
"Link to the W&B launch marketing page",
),
"wandb-init": WBURL(
"https://wandb.me/wandb-init",
"Link to the wandb.init reference documentation page",
),
"define-metric": WBURL(
"https://wandb.me/define-metric",
"Link to the W&B developer guide documentation page on wandb.define_metric",
),
"developer-guide": WBURL(
"https://wandb.me/developer-guide",
"Link to the W&B developer guide top level page",
),
"wandb-core": WBURL(
"https://wandb.me/wandb-core",
"Link to the documentation for the wandb-core service",
),
"wandb-server": WBURL(
"https://wandb.me/wandb-server",
"Link to the documentation for the self-hosted W&B server",
),
"multiprocess": WBURL(
"https://wandb.me/multiprocess",
(
"Link to the W&B developer guide documentation page on how to "
"use wandb in a multiprocess environment"
),
),
}
def url(self, name: str) -> str:
"""Get the URL associated with the given name."""
wb_url = self.urls.get(name)
if wb_url:
return wb_url.url
raise ValueError(f"URL not found for {name}")
def description(self, name: str) -> str:
"""Get the description associated with the given name."""
wb_url = self.urls.get(name)
if wb_url:
return wb_url.description
raise ValueError(f"Description not found for {name}")
# This is an instance of the Links class that can be used to access the URLs
url_registry = Registry()
|
Registry
|
python
|
davidhalter__jedi
|
jedi/inference/context.py
|
{
"start": 11809,
"end": 12227
}
|
class ____(TreeContextMixin, ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
return self._value.get_filters()
def get_value(self):
return self._value
@property
def string_names(self):
return self._value.string_names
def py__file__(self) -> Optional[Path]:
return self._value.py__file__() # type: ignore[no-any-return]
|
NamespaceContext
|
python
|
pytorch__pytorch
|
test/inductor/test_kernel_optimization.py
|
{
"start": 892,
"end": 3515
}
|
class ____(TestCase):
def compare_dict_tensors(self, ref_dict, res_dict, rtol=1e-3, atol=1e-3):
if len(set(ref_dict.keys())) != len(set(res_dict.keys())):
return False
for key1 in ref_dict:
key2 = "_orig_mod." + key1
assert key2 in res_dict, f"{key1} does not exist in traced module"
if not torch.allclose(ref_dict[key1], res_dict[key2], rtol=rtol, atol=atol):
return False
return True
def compare_pred(self, module, traced, input, rtol=1e-3, atol=1e-3):
ref = module(*input)
res = traced(*input)
self.assertEqual(ref, res, rtol=rtol, atol=atol)
def compare_parameters(self, module, traced, rtol=1e-3, atol=1e-3):
ref_params = dict(module.named_parameters())
res_params = dict(traced.named_parameters())
self.assertTrue(self.compare_dict_tensors(ref_params, res_params, rtol, atol))
def compare_gradients(self, module, traced, rtol=1e-3, atol=1e-3):
ref_grad = {key: param.grad for key, param in module.named_parameters()}
res_grad = {key: param.grad for key, param in traced.named_parameters()}
self.assertTrue(
self.compare_dict_tensors(ref_grad, res_grad, rtol=rtol, atol=atol)
)
@requires_gpu()
@torch._inductor.config.patch(
pre_grad_fusion_options={
"einsum_to_pointwise_pass": {},
},
post_grad_fusion_options={},
)
@serialTest() # Needs slightly more memory on GPUs
def test_einsum_to_pointwise(self):
counters.clear()
module = TestEinsumtoPointwise().to(GPU_TYPE)
input = [
torch.randn(4096, 9, 512, device=GPU_TYPE, requires_grad=True),
torch.randn(9, 512, 96, device=GPU_TYPE, requires_grad=True),
torch.randn(9, 96, device=GPU_TYPE, requires_grad=True),
torch.randn(4096, 9, 160, device=GPU_TYPE, requires_grad=True),
torch.randn(4096, 9, 160, 96, device=GPU_TYPE, requires_grad=True),
torch.randn(4096, 9, 96, device=GPU_TYPE, requires_grad=True),
]
traced = torch.compile(module)
ref = module(*input)
res = traced(*input)
ref.sum().backward()
res.sum().backward()
self.compare_pred(module, traced, input)
self.compare_parameters(module, traced)
self.compare_gradients(module, traced)
self.assertEqual(
counters["inductor"]["einsum_to_pointwise_pass"],
1,
)
counters.clear()
if __name__ == "__main__":
run_tests()
|
TestKernelOptimization
|
python
|
getsentry__sentry
|
tests/sentry/uptime/subscriptions/test_subscriptions.py
|
{
"start": 35709,
"end": 39640
}
|
class ____(UptimeTestCase):
@mock.patch("sentry.quotas.backend.disable_seat")
def test(self, mock_disable_seat: mock.MagicMock) -> None:
detector = create_uptime_detector(
self.project,
self.environment,
url="https://sentry.io",
interval_seconds=3600,
timeout_ms=1000,
mode=UptimeMonitorMode.MANUAL,
)
uptime_subscription = get_uptime_subscription(detector)
with self.tasks():
disable_uptime_detector(detector)
uptime_subscription.refresh_from_db()
assert uptime_subscription.status == UptimeSubscription.Status.DISABLED.value
mock_disable_seat.assert_called_with(DataCategory.UPTIME, detector)
detector.refresh_from_db()
assert not detector.enabled
@mock.patch("sentry.quotas.backend.disable_seat")
@mock.patch("sentry.uptime.subscriptions.subscriptions.resolve_uptime_issue")
def test_disable_failed(self, mock_resolve_uptime_issue, mock_disable_seat) -> None:
with (
self.tasks(),
self.feature(UptimeDomainCheckFailure.build_ingest_feature_name()),
):
detector = create_uptime_detector(
self.project,
self.environment,
url="https://sentry.io",
interval_seconds=3600,
timeout_ms=1000,
mode=UptimeMonitorMode.MANUAL,
)
uptime_subscription = get_uptime_subscription(detector)
# Set detector state to HIGH to simulate a failed state
self.create_detector_state(
detector=detector,
detector_group_key=None,
state=DetectorPriorityLevel.HIGH,
is_triggered=True,
)
disable_uptime_detector(detector)
mock_resolve_uptime_issue.assert_called_with(detector)
detector.refresh_from_db()
uptime_subscription.refresh_from_db()
# After disabling, the detector state should be OK and not triggered (we reset it)
detector_state = detector.detectorstate_set.first()
assert detector_state is not None
assert not detector_state.is_triggered
assert detector_state.priority_level == DetectorPriorityLevel.OK
assert uptime_subscription.status == UptimeSubscription.Status.DISABLED.value
mock_disable_seat.assert_called_with(DataCategory.UPTIME, detector)
detector.refresh_from_db()
assert not detector.enabled
@mock.patch("sentry.quotas.backend.disable_seat")
def test_already_disabled(self, mock_disable_seat: mock.MagicMock) -> None:
detector = create_uptime_detector(
self.project,
self.environment,
url="https://sentry.io",
interval_seconds=3600,
timeout_ms=1000,
mode=UptimeMonitorMode.MANUAL,
)
# Manually disable the detector first
detector.update(enabled=False)
disable_uptime_detector(detector)
mock_disable_seat.assert_not_called()
@mock.patch("sentry.quotas.backend.disable_seat")
def test_skip_quotas(self, mock_disable_seat: mock.MagicMock) -> None:
detector = create_uptime_detector(
self.project,
self.environment,
url="https://sentry.io",
interval_seconds=3600,
timeout_ms=1000,
mode=UptimeMonitorMode.MANUAL,
)
uptime_subscription = get_uptime_subscription(detector)
with self.tasks():
disable_uptime_detector(detector, skip_quotas=True)
uptime_subscription.refresh_from_db()
assert uptime_subscription.status == UptimeSubscription.Status.DISABLED.value
mock_disable_seat.assert_not_called()
detector.refresh_from_db()
assert not detector.enabled
|
DisableUptimeDetectorTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/projects.py
|
{
"start": 68105,
"end": 78494
}
|
class ____(Response):
"""
Response of projects.get_all endpoint.
:param projects: Projects list
:type projects: Sequence[ProjectsGetAllResponseSingle]
:param scroll_id: Scroll ID that can be used with the next calls to get_all_ex to retrieve more data
:type scroll_id: str
"""
_service = "projects"
_action = "get_all"
_version = "2.20"
_schema = {
"definitions": {
"projects_get_all_response_single": {
"properties": {
"basename": {
"description": "Project base name",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Creation time",
"format": "date-time",
"type": ["string", "null"],
},
"dataset_stats": {
"description": "Project dataset statistics",
"properties": {
"file_count": {
"description": "The number of files stored in the dataset",
"type": "integer",
},
"total_size": {
"description": "The total dataset size in bytes",
"type": "integer",
},
},
"type": ["object", "null"],
},
"default_output_destination": {
"description": "The default output destination URL for new tasks under this project",
"type": ["string", "null"],
},
"description": {
"description": "Project description",
"type": ["string", "null"],
},
"id": {"description": "Project id", "type": ["string", "null"]},
"last_update": {
"description": "Last update time",
"format": "date-time",
"type": ["string", "null"],
},
"name": {"description": "Project name", "type": ["string", "null"]},
"own_models": {
"description": "The amount of models under this project (without children projects). Returned if 'check_own_contents' flag is set in the request",
"type": ["integer", "null"],
},
"own_tasks": {
"description": "The amount of tasks under this project (without children projects). Returned if 'check_own_contents' flag is set in the request",
"type": ["integer", "null"],
},
"stats": {
"description": "Additional project stats",
"oneOf": [{"$ref": "#/definitions/stats"}, {"type": "null"}],
},
"sub_projects": {
"description": "The list of sub projects",
"items": {
"properties": {
"id": {
"description": "Subproject ID",
"type": "string",
},
"name": {
"description": "Subproject name",
"type": "string",
},
},
"type": "object",
},
"type": ["array", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"stats": {
"properties": {
"active": {
"description": "Stats for active tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
"archived": {
"description": "Stats for archived tasks",
"oneOf": [
{"$ref": "#/definitions/stats_status_count"},
{"type": "null"},
],
},
},
"type": "object",
},
"stats_status_count": {
"properties": {
"completed_tasks_24h": {
"description": "Number of tasks completed in the last 24 hours",
"type": ["integer", "null"],
},
"last_task_run": {
"description": "The most recent started time of a task",
"type": ["integer", "null"],
},
"status_count": {
"description": "Status counts",
"properties": {
"closed": {
"description": "Number of 'closed' tasks in project",
"type": "integer",
},
"completed": {
"description": "Number of 'completed' tasks in project",
"type": "integer",
},
"created": {
"description": "Number of 'created' tasks in project",
"type": "integer",
},
"failed": {
"description": "Number of 'failed' tasks in project",
"type": "integer",
},
"in_progress": {
"description": "Number of 'in_progress' tasks in project",
"type": "integer",
},
"published": {
"description": "Number of 'published' tasks in project",
"type": "integer",
},
"queued": {
"description": "Number of 'queued' tasks in project",
"type": "integer",
},
"stopped": {
"description": "Number of 'stopped' tasks in project",
"type": "integer",
},
"unknown": {
"description": "Number of 'unknown' tasks in project",
"type": "integer",
},
},
"type": ["object", "null"],
},
"total_runtime": {
"description": "Total run time of all tasks in project (in seconds)",
"type": ["integer", "null"],
},
"total_tasks": {
"description": "Number of tasks",
"type": ["integer", "null"],
},
},
"type": "object",
},
},
"properties": {
"projects": {
"description": "Projects list",
"items": {"$ref": "#/definitions/projects_get_all_response_single"},
"type": ["array", "null"],
},
"scroll_id": {
"description": "Scroll ID that can be used with the next calls to get_all_ex to retrieve more data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, projects: Optional[List[Any]] = None, scroll_id: Optional[str] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.projects = projects
self.scroll_id = scroll_id
@schema_property("projects")
def projects(self) -> Optional[List[Any]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [ProjectsGetAllResponseSingle.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "projects", ProjectsGetAllResponseSingle, is_array=True)
self._property_projects = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
|
GetAllResponse
|
python
|
keras-team__keras
|
keras/src/layers/preprocessing/image_preprocessing/random_flip.py
|
{
"start": 693,
"end": 8057
}
|
class ____(BaseImagePreprocessingLayer):
"""A preprocessing layer which randomly flips images during training.
This layer will flip the images horizontally and or vertically based on the
`mode` attribute. During inference time, the output will be identical to
input. Call the layer with `training=True` to flip the input.
Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and
of integer or floating point dtype.
By default, the layer will output floats.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Input shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Output shape:
3D (unbatched) or 4D (batched) tensor with shape:
`(..., height, width, channels)`, in `"channels_last"` format.
Args:
mode: String indicating which flip mode to use. Can be `"horizontal"`,
`"vertical"`, or `"horizontal_and_vertical"`. `"horizontal"` is a
left-right flip and `"vertical"` is a top-bottom flip. Defaults to
`"horizontal_and_vertical"`
seed: Integer. Used to create a random seed.
**kwargs: Base layer keyword arguments, such as
`name` and `dtype`.
"""
_USE_BASE_FACTOR = False
def __init__(
self,
mode=HORIZONTAL_AND_VERTICAL,
seed=None,
data_format=None,
**kwargs,
):
super().__init__(data_format=data_format, **kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
self.mode = mode
self._convert_input_args = False
self._allow_non_tensor_positional_args = True
def get_random_transformation(self, data, training=True, seed=None):
if not training:
return None
if isinstance(data, dict):
images = data["images"]
else:
images = data
shape = self.backend.core.shape(images)
if len(shape) == 3:
flips_shape = (1, 1, 1)
else:
flips_shape = (shape[0], 1, 1, 1)
if seed is None:
seed = self._get_seed_generator(self.backend._backend)
flips = self.backend.numpy.less_equal(
self.backend.random.uniform(shape=flips_shape, seed=seed), 0.5
)
return {"flips": flips, "input_shape": shape}
def transform_images(self, images, transformation, training=True):
images = self.backend.cast(images, self.compute_dtype)
if training:
return self._flip_inputs(images, transformation)
return images
def transform_labels(self, labels, transformation, training=True):
return labels
def transform_bounding_boxes(
self,
bounding_boxes,
transformation,
training=True,
):
def _flip_boxes_horizontal(boxes):
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
outputs = self.backend.numpy.concatenate(
[1 - x3, x2, 1 - x1, x4], axis=-1
)
return outputs
def _flip_boxes_vertical(boxes):
x1, x2, x3, x4 = self.backend.numpy.split(boxes, 4, axis=-1)
outputs = self.backend.numpy.concatenate(
[x1, 1 - x4, x3, 1 - x2], axis=-1
)
return outputs
def _transform_xyxy(boxes, box_flips):
bboxes = boxes["boxes"]
if self.mode in {HORIZONTAL, HORIZONTAL_AND_VERTICAL}:
bboxes = self.backend.numpy.where(
box_flips,
_flip_boxes_horizontal(bboxes),
bboxes,
)
if self.mode in {VERTICAL, HORIZONTAL_AND_VERTICAL}:
bboxes = self.backend.numpy.where(
box_flips,
_flip_boxes_vertical(bboxes),
bboxes,
)
return bboxes
if training:
if backend_utils.in_tf_graph():
self.backend.set_backend("tensorflow")
flips = self.backend.numpy.squeeze(transformation["flips"], axis=-1)
if self.data_format == "channels_first":
height_axis = -2
width_axis = -1
else:
height_axis = -3
width_axis = -2
input_height, input_width = (
transformation["input_shape"][height_axis],
transformation["input_shape"][width_axis],
)
bounding_boxes = convert_format(
bounding_boxes,
source=self.bounding_box_format,
target="rel_xyxy",
height=input_height,
width=input_width,
)
bounding_boxes["boxes"] = _transform_xyxy(bounding_boxes, flips)
bounding_boxes = clip_to_image_size(
bounding_boxes=bounding_boxes,
height=input_height,
width=input_width,
bounding_box_format="xyxy",
)
bounding_boxes = convert_format(
bounding_boxes,
source="rel_xyxy",
target=self.bounding_box_format,
height=input_height,
width=input_width,
)
self.backend.reset()
return bounding_boxes
def transform_segmentation_masks(
self, segmentation_masks, transformation, training=True
):
return self.transform_images(
segmentation_masks, transformation, training=training
)
def _flip_inputs(self, inputs, transformation):
if transformation is None:
return inputs
flips = transformation["flips"]
inputs_shape = self.backend.shape(inputs)
unbatched = len(inputs_shape) == 3
if unbatched:
inputs = self.backend.numpy.expand_dims(inputs, axis=0)
flipped_outputs = inputs
if self.data_format == "channels_last":
horizontal_axis = -2
vertical_axis = -3
else:
horizontal_axis = -1
vertical_axis = -2
if self.mode == HORIZONTAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
flips,
self.backend.numpy.flip(flipped_outputs, axis=horizontal_axis),
flipped_outputs,
)
if self.mode == VERTICAL or self.mode == HORIZONTAL_AND_VERTICAL:
flipped_outputs = self.backend.numpy.where(
flips,
self.backend.numpy.flip(flipped_outputs, axis=vertical_axis),
flipped_outputs,
)
if unbatched:
flipped_outputs = self.backend.numpy.squeeze(
flipped_outputs, axis=0
)
return flipped_outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"seed": self.seed,
"mode": self.mode,
"data_format": self.data_format,
}
)
return config
|
RandomFlip
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/visitors.py
|
{
"start": 1552,
"end": 4017
}
|
class ____:
"""Base class for visitable objects.
:class:`.Visitable` is used to implement the SQL compiler dispatch
functions. Other forms of traversal such as for cache key generation
are implemented separately using the :class:`.HasTraverseInternals`
interface.
.. versionchanged:: 2.0 The :class:`.Visitable` class was named
:class:`.Traversible` in the 1.4 series; the name is changed back
to :class:`.Visitable` in 2.0 which is what it was prior to 1.4.
Both names remain importable in both 1.4 and 2.0 versions.
"""
__slots__ = ()
__visit_name__: str
_original_compiler_dispatch: _CompilerDispatchType
if typing.TYPE_CHECKING:
def _compiler_dispatch(self, visitor: Any, **kw: Any) -> str: ...
def __init_subclass__(cls) -> None:
if "__visit_name__" in cls.__dict__:
cls._generate_compiler_dispatch()
super().__init_subclass__()
@classmethod
def _generate_compiler_dispatch(cls) -> None:
visit_name = cls.__visit_name__
if "_compiler_dispatch" in cls.__dict__:
# class has a fixed _compiler_dispatch() method.
# copy it to "original" so that we can get it back if
# sqlalchemy.ext.compiles overrides it.
cls._original_compiler_dispatch = cls._compiler_dispatch
return
if not isinstance(visit_name, str):
raise exc.InvalidRequestError(
f"__visit_name__ on class {cls.__name__} must be a string "
"at the class level"
)
name = "visit_%s" % visit_name
getter = operator.attrgetter(name)
def _compiler_dispatch(
self: Visitable, visitor: Any, **kw: Any
) -> str:
"""Look for an attribute named "visit_<visit_name>" on the
visitor, and call it with the same kw params.
"""
try:
meth = getter(visitor)
except AttributeError as err:
return visitor.visit_unsupported_compilation(self, err, **kw) # type: ignore # noqa: E501
else:
return meth(self, **kw) # type: ignore # noqa: E501
cls._compiler_dispatch = ( # type: ignore
cls._original_compiler_dispatch
) = _compiler_dispatch
def __class_getitem__(cls, key: Any) -> Any:
# allow generic classes in py3.9+
return cls
|
Visitable
|
python
|
jazzband__django-pipeline
|
tests/tests/test_conf.py
|
{
"start": 131,
"end": 1520
}
|
class ____(TestCase):
def test_3unicode(self):
s = PipelineSettings({"FOO_BINARY": "env actualprogram"})
self.assertEqual(s.FOO_BINARY, ("env", "actualprogram"))
def test_2unicode(self):
s = PipelineSettings({"FOO_BINARY": "env actualprogram"})
self.assertEqual(s.FOO_BINARY, ("env", "actualprogram"))
def test_2bytes(self):
s = PipelineSettings({"FOO_BINARY": "env actualprogram"})
self.assertEqual(s.FOO_BINARY, ("env", "actualprogram"))
def test_expected_splitting(self):
s = PipelineSettings({"FOO_BINARY": "env actualprogram"})
self.assertEqual(s.FOO_BINARY, ("env", "actualprogram"))
@skipIf(sys.platform.startswith("win"), "requires posix platform")
def test_expected_preservation(self):
s = PipelineSettings({"FOO_BINARY": r"actual\ program"})
self.assertEqual(s.FOO_BINARY, ("actual program",))
@skipUnless(sys.platform.startswith("win"), "requires windows")
def test_win_path_preservation(self):
s = PipelineSettings({"FOO_BINARY": "C:\\Test\\ActualProgram.exe argument"})
self.assertEqual(s.FOO_BINARY, ("C:\\Test\\ActualProgram.exe", "argument"))
def test_tuples_are_normal(self):
s = PipelineSettings({"FOO_ARGUMENTS": ("explicit", "with", "args")})
self.assertEqual(s.FOO_ARGUMENTS, ("explicit", "with", "args"))
|
TestSettings
|
python
|
PyCQA__pylint
|
tests/functional/a/arguments.py
|
{
"start": 2769,
"end": 3101
}
|
class ____:
@staticmethod
def test(first, second=None, **kwargs):
return first, second, kwargs
def func(self):
self.test(42)
self.test(42, second=34)
self.test(42, 42)
self.test() # [no-value-for-parameter]
self.test(42, 42, 42) # [too-many-function-args]
|
TestStaticMethod
|
python
|
getsentry__sentry
|
tests/sentry/snuba/test_tasks.py
|
{
"start": 2156,
"end": 4527
}
|
class ____(TestCase, metaclass=abc.ABCMeta):
__test__ = Abstract(__module__, __qualname__)
status_translations = {
QuerySubscription.Status.CREATING: "create",
QuerySubscription.Status.UPDATING: "update",
QuerySubscription.Status.DELETING: "delete",
}
@pytest.fixture(autouse=True)
def _setup_metrics(self):
with patch("sentry.snuba.tasks.metrics") as self.metrics:
yield
@abc.abstractproperty
def expected_status(self):
pass
@abc.abstractmethod
def task(self, subscription_id: int) -> Any:
pass
def create_subscription(
self,
status=None,
subscription_id=None,
dataset=None,
query=None,
aggregate=None,
time_window=None,
query_extra=None,
):
if status is None:
status = self.expected_status
if dataset is None:
dataset = Dataset.Events
if aggregate is None:
aggregate = "count_unique(tags[sentry:user])"
if query is None:
query = "hello"
if time_window is None:
time_window = 60
resolution = 60
snuba_query = SnubaQuery.objects.create(
type=query_datasets_to_type[dataset].value,
dataset=dataset.value,
aggregate=aggregate,
query=query,
time_window=time_window,
resolution=resolution,
)
return QuerySubscription.objects.create(
snuba_query=snuba_query,
status=status.value,
subscription_id=subscription_id,
project=self.project,
type="something",
query_extra=query_extra,
)
def test_no_subscription(self) -> None:
self.task(12345)
self.metrics.incr.assert_called_once_with(
"snuba.subscriptions.{}.subscription_does_not_exist".format(
self.status_translations[self.expected_status]
)
)
def test_invalid_status(self) -> None:
sub = self.create_subscription(QuerySubscription.Status.ACTIVE)
self.task(sub.id)
self.metrics.incr.assert_called_once_with(
"snuba.subscriptions.{}.incorrect_status".format(
self.status_translations[self.expected_status]
)
)
|
BaseSnubaTaskTest
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/looker.py
|
{
"start": 1367,
"end": 7418
}
|
class ____(BaseHook):
"""Hook for Looker APIs."""
conn_name_attr = "looker_conn_id"
default_conn_name = "looker_default"
conn_type = "gcp_looker"
hook_name = "Google Looker"
def __init__(
self,
looker_conn_id: str,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.looker_conn_id = looker_conn_id
# source is used to track origin of the requests
self.source = f"airflow:{version}"
def start_pdt_build(
self,
model: str,
view: str,
query_params: dict | None = None,
):
"""
Submit a PDT materialization job to Looker.
:param model: Required. The model of the PDT to start building.
:param view: Required. The view of the PDT to start building.
:param query_params: Optional. Additional materialization parameters.
"""
self.log.info("Submitting PDT materialization job. Model: '%s', view: '%s'.", model, view)
self.log.debug("PDT materialization job source: '%s'.", self.source)
sdk = self.get_looker_sdk()
looker_ver = sdk.versions().looker_release_version
if parse_version(looker_ver) < parse_version("22.2.0"):
raise AirflowException(f"This API requires Looker version 22.2+. Found: {looker_ver}.")
# unpack query_params dict into kwargs (if not None)
if query_params:
resp = sdk.start_pdt_build(model_name=model, view_name=view, source=self.source, **query_params)
else:
resp = sdk.start_pdt_build(model_name=model, view_name=view, source=self.source)
self.log.info("Start PDT build response: '%s'.", resp)
return resp
def check_pdt_build(
self,
materialization_id: str,
):
"""
Get the PDT materialization job status from Looker.
:param materialization_id: Required. The materialization id to check status for.
"""
self.log.info("Requesting PDT materialization job status. Job id: %s.", materialization_id)
sdk = self.get_looker_sdk()
resp = sdk.check_pdt_build(materialization_id=materialization_id)
self.log.info("Check PDT build response: '%s'.", resp)
return resp
def pdt_build_status(
self,
materialization_id: str,
) -> dict:
"""
Get the PDT materialization job status.
:param materialization_id: Required. The materialization id to check status for.
"""
resp = self.check_pdt_build(materialization_id=materialization_id)
status_json = resp["resp_text"]
status_dict = json.loads(status_json)
self.log.info(
"PDT materialization job id: %s. Status: '%s'.", materialization_id, status_dict["status"]
)
return status_dict
def stop_pdt_build(
self,
materialization_id: str,
):
"""
Start a PDT materialization job cancellation request.
:param materialization_id: Required. The materialization id to stop.
"""
self.log.info("Stopping PDT materialization. Job id: %s.", materialization_id)
self.log.debug("PDT materialization job source: '%s'.", self.source)
sdk = self.get_looker_sdk()
resp = sdk.stop_pdt_build(materialization_id=materialization_id, source=self.source)
self.log.info("Stop PDT build response: '%s'.", resp)
return resp
def wait_for_job(
self,
materialization_id: str,
wait_time: int = 10,
timeout: int | None = None,
) -> None:
"""
Poll a PDT materialization job to check if it finishes.
:param materialization_id: Required. The materialization id to wait for.
:param wait_time: Optional. Number of seconds between checks.
:param timeout: Optional. How many seconds wait for job to be ready.
Used only if ``asynchronous`` is False.
"""
self.log.info("Waiting for PDT materialization job to complete. Job id: %s.", materialization_id)
status = None
start = time.monotonic()
while status not in (
JobStatus.DONE.value,
JobStatus.ERROR.value,
JobStatus.CANCELLED.value,
JobStatus.UNKNOWN.value,
):
if timeout and start + timeout < time.monotonic():
self.stop_pdt_build(materialization_id=materialization_id)
raise AirflowException(
f"Timeout: PDT materialization job is not ready after {timeout}s. "
f"Job id: {materialization_id}."
)
time.sleep(wait_time)
status_dict = self.pdt_build_status(materialization_id=materialization_id)
status = status_dict["status"]
if status == JobStatus.ERROR.value:
msg = status_dict["message"]
raise AirflowException(
f'PDT materialization job failed. Job id: {materialization_id}. Message:\n"{msg}"'
)
if status == JobStatus.CANCELLED.value:
raise AirflowException(f"PDT materialization job was cancelled. Job id: {materialization_id}.")
if status == JobStatus.UNKNOWN.value:
raise AirflowException(
f"PDT materialization job has unknown status. Job id: {materialization_id}."
)
self.log.info("PDT materialization job completed successfully. Job id: %s.", materialization_id)
def get_looker_sdk(self):
"""Return Looker SDK client for Looker API 4.0."""
conn = self.get_connection(self.looker_conn_id)
settings = LookerApiSettings(conn)
transport = requests_transport.RequestsTransport.configure(settings)
return methods40.Looker40SDK(
auth_session.AuthSession(settings, transport, serialize.deserialize40, "4.0"),
serialize.deserialize40,
serialize.serialize40,
transport,
"4.0",
)
|
LookerHook
|
python
|
scrapy__scrapy
|
scrapy/extensions/httpcache.py
|
{
"start": 949,
"end": 1792
}
|
class ____:
def __init__(self, settings: BaseSettings):
self.ignore_schemes: list[str] = settings.getlist("HTTPCACHE_IGNORE_SCHEMES")
self.ignore_http_codes: list[int] = [
int(x) for x in settings.getlist("HTTPCACHE_IGNORE_HTTP_CODES")
]
def should_cache_request(self, request: Request) -> bool:
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response: Response, request: Request) -> bool:
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(
self, cachedresponse: Response, request: Request
) -> bool:
return True
def is_cached_response_valid(
self, cachedresponse: Response, response: Response, request: Request
) -> bool:
return True
|
DummyPolicy
|
python
|
fastai__fastai
|
fastai/vision/augment.py
|
{
"start": 3913,
"end": 7151
}
|
class ____(RandTransform):
"Randomly flip with probability `p`"
def before_call(self, b, split_idx):
super().before_call(b, split_idx)
self.k = random.randint(0,7)
def encodes(self, x:(Image.Image,*TensorTypes)): return x.dihedral(self.k)
# %% ../../nbs/09_vision.augment.ipynb 27
from torchvision.transforms.functional import pad as tvpad
# %% ../../nbs/09_vision.augment.ipynb 28
mk_class('PadMode', **{o:o.lower() for o in ['Zeros', 'Border', 'Reflection']},
doc="All possible padding mode as attributes to get tab-completion and typo-proofing")
# %% ../../nbs/09_vision.augment.ipynb 29
_all_ = ['PadMode']
# %% ../../nbs/09_vision.augment.ipynb 31
_pad_modes = {'zeros': 'constant', 'border': 'edge', 'reflection': 'reflect'}
@patch
def _do_crop_pad(x:Image.Image, sz, tl, orig_sz,
pad_mode=PadMode.Zeros, resize_mode=BILINEAR, resize_to=None):
if any(tl.ge(0)) or any(tl.add(sz).le(orig_sz)):
# At least one dim is inside the image, so needs to be cropped
c = tl.max(0)
x = x.crop((*c, *tl.add(sz).min(orig_sz)))
if any(tl.lt(0)) or any(tl.add(sz).ge(orig_sz)):
# At least one dim is outside the image, so needs to be padded
p = (-tl).max(0)
f = (sz-orig_sz).add(tl).max(0)
x = tvpad(x, (*p, *f), padding_mode=_pad_modes[pad_mode])
if resize_to is not None: x = x.resize(resize_to, resize_mode)
return x
@patch
def _do_crop_pad(x:TensorPoint, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
#assert pad_mode==PadMode.Zeros,"Only zero padding is supported for `TensorPoint` and `TensorBBox`"
orig_sz,sz,tl = map(FloatTensor, (orig_sz,sz,tl))
return TensorPoint((x+1)*orig_sz/sz - tl*2/sz - 1, sz=sz if resize_to is None else resize_to)
@patch
def _do_crop_pad(x:TensorBBox, sz, tl, orig_sz, pad_mode=PadMode.Zeros, resize_to=None, **kwargs):
bbox = TensorPoint._do_crop_pad(x.view(-1,2), sz, tl, orig_sz, pad_mode, resize_to).view(-1,4)
return TensorBBox(bbox, img_size=x.img_size)
@patch
def crop_pad(x:TensorBBox|TensorPoint|Image.Image,
sz:int|tuple, # Crop/pad size of input, duplicated if one value is specified
tl:tuple=None, # Optional top-left coordinate of the crop/pad, if `None` center crop
orig_sz:tuple=None, # Original size of input
pad_mode:PadMode=PadMode.Zeros, # Fastai padding mode
resize_mode=BILINEAR, # Pillow `Image` resize mode
resize_to:tuple=None # Optional post crop/pad resize of input
):
if isinstance(sz,int): sz = (sz,sz)
orig_sz = fastuple(_get_sz(x) if orig_sz is None else orig_sz)
sz,tl = fastuple(sz),fastuple(((_get_sz(x)-sz)//2) if tl is None else tl)
return x._do_crop_pad(sz, tl, orig_sz=orig_sz, pad_mode=pad_mode, resize_mode=resize_mode, resize_to=resize_to)
# %% ../../nbs/09_vision.augment.ipynb 32
def _process_sz(size):
if isinstance(size,int): size=(size,size)
return fastuple(size[1],size[0])
def _get_sz(x):
if isinstance(x, tuple): x = x[0]
if not isinstance(x, Tensor): return fastuple(x.size)
return fastuple(getattr(x, 'img_size', getattr(x, 'sz', (x.shape[-1], x.shape[-2]))))
# %% ../../nbs/09_vision.augment.ipynb 33
@delegates()
|
DihedralItem
|
python
|
sympy__sympy
|
sympy/codegen/approximations.py
|
{
"start": 474,
"end": 3428
}
|
class ____(Optimization):
"""
Approximates sum by neglecting small terms.
Explanation
===========
If terms are expressions which can be determined to be monotonic, then
bounds for those expressions are added.
Parameters
==========
bounds : dict
Mapping expressions to length 2 tuple of bounds (low, high).
reltol : number
Threshold for when to ignore a term. Taken relative to the largest
lower bound among bounds.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import x, y, z
>>> from sympy.codegen.rewriting import optimize
>>> from sympy.codegen.approximations import SumApprox
>>> bounds = {x: (-1, 1), y: (1000, 2000), z: (-10, 3)}
>>> sum_approx3 = SumApprox(bounds, reltol=1e-3)
>>> sum_approx2 = SumApprox(bounds, reltol=1e-2)
>>> sum_approx1 = SumApprox(bounds, reltol=1e-1)
>>> expr = 3*(x + y + exp(z))
>>> optimize(expr, [sum_approx3])
3*(x + y + exp(z))
>>> optimize(expr, [sum_approx2])
3*y + 3*exp(z)
>>> optimize(expr, [sum_approx1])
3*y
"""
def __init__(self, bounds, reltol, **kwargs):
super().__init__(**kwargs)
self.bounds = bounds
self.reltol = reltol
def __call__(self, expr):
return expr.factor().replace(self.query, lambda arg: self.value(arg))
def query(self, expr):
return expr.is_Add
def value(self, add):
for term in add.args:
if term.is_number or term in self.bounds or len(term.free_symbols) != 1:
continue
fs, = term.free_symbols
if fs not in self.bounds:
continue
intrvl = Interval(*self.bounds[fs])
if is_increasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][0]}),
term.subs({fs: self.bounds[fs][1]})
)
elif is_decreasing(term, intrvl, fs):
self.bounds[term] = (
term.subs({fs: self.bounds[fs][1]}),
term.subs({fs: self.bounds[fs][0]})
)
else:
return add
if all(term.is_number or term in self.bounds for term in add.args):
bounds = [(term, term) if term.is_number else self.bounds[term] for term in add.args]
largest_abs_guarantee = 0
for lo, hi in bounds:
if lo <= 0 <= hi:
continue
largest_abs_guarantee = max(largest_abs_guarantee,
min(abs(lo), abs(hi)))
new_terms = []
for term, (lo, hi) in zip(add.args, bounds):
if max(abs(lo), abs(hi)) >= largest_abs_guarantee*self.reltol:
new_terms.append(term)
return add.func(*new_terms)
else:
return add
|
SumApprox
|
python
|
crytic__slither
|
slither/vyper_parsing/ast/types.py
|
{
"start": 2178,
"end": 2258
}
|
class ____(ASTNode):
arg: str
annotation: Optional[ASTNode]
@dataclass
|
Arg
|
python
|
django-haystack__django-haystack
|
test_haystack/solr_tests/test_solr_backend.py
|
{
"start": 30069,
"end": 32503
}
|
class ____(TestCase):
fixtures = ["base_data.json"]
def setUp(self):
super().setUp()
# Wipe it clean.
clear_solr_index()
# Stow.
self.old_ui = connections["solr"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = SolrMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["solr"]._index = self.ui
self.sb = connections["solr"].get_backend()
self.sq = connections["solr"].get_query()
# Force indexing of the content.
self.smmi.update("solr")
def tearDown(self):
connections["solr"]._index = self.old_ui
super().tearDown()
def test_get_spelling(self):
self.sq.add_filter(SQ(content="Indexy"))
# Default collate + spelling path
self.assertEqual(self.sq.get_spelling_suggestion(), "(index)")
self.assertEqual(self.sq.get_spelling_suggestion("indexy"), "(index)")
# Just spelling path
self.sq.run(spelling_query="Indexy", collate=False)
self.assertEqual(self.sq._spelling_suggestion, "index")
def test_log_query(self):
reset_search_queries()
self.assertEqual(len(connections["solr"].queries), 0)
with self.settings(DEBUG=False):
len(self.sq.get_results())
self.assertEqual(len(connections["solr"].queries), 0)
with self.settings(DEBUG=True):
# Redefine it to clear out the cached results.
self.sq = connections["solr"].get_query()
self.sq.add_filter(SQ(name="bar"))
len(self.sq.get_results())
self.assertEqual(len(connections["solr"].queries), 1)
self.assertEqual(
connections["solr"].queries[0]["query_string"], "name:(bar)"
)
# And again, for good measure.
self.sq = connections["solr"].get_query()
self.sq.add_filter(SQ(name="bar"))
self.sq.add_filter(SQ(text="moof"))
len(self.sq.get_results())
self.assertEqual(len(connections["solr"].queries), 2)
self.assertEqual(
connections["solr"].queries[0]["query_string"], "name:(bar)"
)
self.assertEqual(
connections["solr"].queries[1]["query_string"],
"(name:(bar) AND text:(moof))",
)
@override_settings(DEBUG=True)
|
LiveSolrSearchQueryTestCase
|
python
|
neetcode-gh__leetcode
|
python/0013-roman-to-integer.py
|
{
"start": 0,
"end": 359
}
|
class ____:
def romanToInt(self, s: str) -> int:
roman = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
res = 0
for i in range(len(s)):
if i + 1 < len(s) and roman[s[i]] < roman[s[i + 1]]:
res -= roman[s[i]]
else:
res += roman[s[i]]
return res
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/multi_asset_sensor_definition.py
|
{
"start": 38457,
"end": 51107
}
|
class ____:
_advanced_record_ids_by_key: dict[AssetKey, set[int]]
_partition_key_by_record_id: dict[int, Optional[str]]
advance_all_cursors_called: bool
def __init__(self):
self._advanced_record_ids_by_key = defaultdict(set)
self._partition_key_by_record_id = {}
self.advance_all_cursors_called = False
def add_advanced_records(
self, materialization_records_by_key: Mapping[AssetKey, Optional["EventLogRecord"]]
):
for asset_key, materialization in materialization_records_by_key.items():
if materialization:
self._advanced_record_ids_by_key[asset_key].add(materialization.storage_id)
self._partition_key_by_record_id[materialization.storage_id] = (
materialization.partition_key
)
def get_cursor_with_advances(
self,
context: MultiAssetSensorEvaluationContext,
initial_cursor: MultiAssetSensorContextCursor,
) -> Optional[str]:
"""Given the multi asset sensor context and the cursor at the start of the tick,
returns the cursor that should be used in the next tick.
If the cursor has not been updated, returns None
"""
if len(self._advanced_record_ids_by_key) == 0:
# No events marked as advanced
return None
return json.dumps(
{
str(asset_key): self.get_asset_cursor_with_advances(
asset_key, context, initial_cursor
)
for asset_key in context.asset_keys
}
)
def get_asset_cursor_with_advances(
self,
asset_key: AssetKey,
context: MultiAssetSensorEvaluationContext,
initial_cursor: MultiAssetSensorContextCursor,
) -> MultiAssetSensorAssetCursorComponent:
from dagster._core.event_api import AssetRecordsFilter
advanced_records: set[int] = self._advanced_record_ids_by_key.get(asset_key, set())
if len(advanced_records) == 0:
# No events marked as advanced for this asset key
return initial_cursor.get_cursor_for_asset(asset_key)
initial_asset_cursor = initial_cursor.get_cursor_for_asset(asset_key)
latest_consumed_event_id_at_tick_start = initial_asset_cursor.latest_consumed_event_id
greatest_consumed_event_id_in_tick = max(advanced_records)
latest_consumed_partition_in_tick = self._partition_key_by_record_id[
greatest_consumed_event_id_in_tick
]
latest_unconsumed_record_by_partition: dict[str, int] = {}
if not self.advance_all_cursors_called:
latest_unconsumed_record_by_partition = (
initial_asset_cursor.trailing_unconsumed_partitioned_event_ids
)
if greatest_consumed_event_id_in_tick > (latest_consumed_event_id_at_tick_start or 0):
materialization_events = []
has_more = True
cursor = None
while has_more:
result = context.instance.fetch_materializations(
AssetRecordsFilter(
asset_key=asset_key,
after_storage_id=latest_consumed_event_id_at_tick_start,
before_storage_id=greatest_consumed_event_id_in_tick,
),
ascending=True,
limit=FETCH_MATERIALIZATION_BATCH_SIZE,
cursor=cursor,
)
cursor = result.cursor
has_more = result.has_more
materialization_events.extend(result.records)
unconsumed_events = list(context.get_trailing_unconsumed_events(asset_key)) + list(
materialization_events
)
else:
unconsumed_events = []
# Iterate through events in ascending order, storing the latest unconsumed
# event for each partition. If an advanced event exists for a partition, clear
# the prior unconsumed event for that partition.
for event in unconsumed_events:
partition = event.partition_key
if partition is not None: # Ignore unpartitioned events
if event.storage_id not in advanced_records:
latest_unconsumed_record_by_partition[partition] = event.storage_id
elif partition in latest_unconsumed_record_by_partition:
latest_unconsumed_record_by_partition.pop(partition)
if (
latest_consumed_partition_in_tick is not None
and latest_consumed_partition_in_tick in latest_unconsumed_record_by_partition
):
latest_unconsumed_record_by_partition.pop(latest_consumed_partition_in_tick)
if len(latest_unconsumed_record_by_partition.keys()) >= MAX_NUM_UNCONSUMED_EVENTS:
raise DagsterInvariantViolationError(
f"""
You have reached the maximum number of trailing unconsumed events
({MAX_NUM_UNCONSUMED_EVENTS}) for asset {asset_key} and no more events can be
added. You can access the unconsumed events by calling the
`get_trailing_unconsumed_events` method on the sensor context, and
mark events as consumed by passing them to `advance_cursor`.
Otherwise, you can clear all unconsumed events and reset the cursor to the latest
materialization for each asset by calling `advance_all_cursors`.
"""
)
return MultiAssetSensorAssetCursorComponent(
latest_consumed_event_partition=(
latest_consumed_partition_in_tick
if greatest_consumed_event_id_in_tick
> (latest_consumed_event_id_at_tick_start or 0)
else initial_asset_cursor.latest_consumed_event_partition
),
latest_consumed_event_id=(
greatest_consumed_event_id_in_tick
if greatest_consumed_event_id_in_tick
> (latest_consumed_event_id_at_tick_start or 0)
else latest_consumed_event_id_at_tick_start
),
trailing_unconsumed_partitioned_event_ids=latest_unconsumed_record_by_partition,
)
def get_cursor_from_latest_materializations(
asset_keys: Sequence[AssetKey], instance: DagsterInstance
) -> str:
cursor_dict: dict[str, MultiAssetSensorAssetCursorComponent] = {}
for asset_key in asset_keys:
materializations = instance.fetch_materializations(asset_key, limit=1).records
if materializations:
last_materialization = materializations[0]
cursor_dict[str(asset_key)] = MultiAssetSensorAssetCursorComponent(
last_materialization.partition_key,
last_materialization.storage_id,
{},
)
cursor_str = json.dumps(cursor_dict)
return cursor_str
def build_multi_asset_sensor_context(
*,
monitored_assets: Union[Sequence[AssetKey], AssetSelection],
repository_def: Optional["RepositoryDefinition"] = None,
instance: Optional[DagsterInstance] = None,
cursor: Optional[str] = None,
repository_name: Optional[str] = None,
cursor_from_latest_materializations: bool = False,
resources: Optional[Mapping[str, object]] = None,
definitions: Optional["Definitions"] = None,
last_sensor_start_time: Optional[float] = None,
) -> MultiAssetSensorEvaluationContext:
"""Builds multi asset sensor execution context for testing purposes using the provided parameters.
This function can be used to provide a context to the invocation of a multi asset sensor definition. If
provided, the dagster instance must be persistent; DagsterInstance.ephemeral() will result in an
error.
Args:
monitored_assets (Union[Sequence[AssetKey], AssetSelection]): The assets monitored
by the sensor. If an AssetSelection object is provided, it will only apply to assets
within the Definitions that this sensor is part of.
repository_def (RepositoryDefinition): `RepositoryDefinition` object that
the sensor is defined in. Must provide `definitions` if this is not provided.
instance (Optional[DagsterInstance]): The dagster instance configured to run the sensor.
cursor (Optional[str]): A string cursor to provide to the evaluation of the sensor. Must be
a dictionary of asset key strings to ints that has been converted to a json string
repository_name (Optional[str]): The name of the repository that the sensor belongs to.
cursor_from_latest_materializations (bool): If True, the cursor will be set to the latest
materialization for each monitored asset. By default, set to False.
resources (Optional[Mapping[str, object]]): The resource definitions
to provide to the sensor.
definitions (Optional[Definitions]): `Definitions` object that the sensor is defined in.
Must provide `repository_def` if this is not provided.
Examples:
.. code-block:: python
with instance_for_test() as instance:
context = build_multi_asset_sensor_context(
monitored_assets=[AssetKey("asset_1"), AssetKey("asset_2")],
instance=instance,
)
my_asset_sensor(context)
"""
from dagster._core.definitions import RepositoryDefinition
from dagster._core.definitions.definitions_class import Definitions
from dagster._core.execution.build_resources import wrap_resources_for_execution
check.opt_inst_param(instance, "instance", DagsterInstance)
check.opt_str_param(cursor, "cursor")
check.opt_str_param(repository_name, "repository_name")
repository_def = normalize_to_repository(
check.opt_inst_param(definitions, "definitions", Definitions),
check.opt_inst_param(repository_def, "repository_def", RepositoryDefinition),
)
check.bool_param(cursor_from_latest_materializations, "cursor_from_latest_materializations")
check.opt_float_param(last_sensor_start_time, "last_sensor_start_time")
if cursor_from_latest_materializations:
if cursor:
raise DagsterInvalidInvocationError(
"Cannot provide both cursor and cursor_from_latest_materializations objects."
" Dagster will override the provided cursor based on the"
" cursor_from_latest_materializations object."
)
if not instance:
raise DagsterInvalidInvocationError(
"Cannot provide cursor_from_latest_materializations object without a Dagster"
" instance."
)
asset_keys: Sequence[AssetKey]
if isinstance(monitored_assets, AssetSelection):
asset_keys = cast(
"list[AssetKey]",
list(monitored_assets.resolve(list(set(repository_def.asset_graph.assets_defs)))),
)
else:
asset_keys = monitored_assets
cursor = get_cursor_from_latest_materializations(asset_keys, instance)
return MultiAssetSensorEvaluationContext(
instance_ref=None,
last_completion_time=None,
last_run_key=None,
cursor=cursor,
repository_name=repository_name,
instance=instance,
monitored_assets=monitored_assets,
repository_def=repository_def,
resource_defs=wrap_resources_for_execution(resources),
last_sensor_start_time=last_sensor_start_time,
)
AssetMaterializationFunctionReturn = Union[
Iterator[Union[RunRequest, SkipReason, SensorResult]],
Sequence[RunRequest],
RunRequest,
SkipReason,
None,
SensorResult,
]
AssetMaterializationFunction = Callable[
...,
AssetMaterializationFunctionReturn,
]
MultiAssetMaterializationFunction = Callable[
...,
AssetMaterializationFunctionReturn,
]
@superseded(
additional_warn_text="For most use cases, Declarative Automation should be used instead of "
"multi_asset_sensors to monitor the status of upstream assets and launch runs in response. "
"In cases where side effects are required, or a specific job must be targeted for execution, "
"multi_asset_sensors may be used."
)
@public
|
MultiAssetSensorCursorAdvances
|
python
|
pypa__hatch
|
tests/backend/builders/test_config.py
|
{
"start": 26353,
"end": 35015
}
|
class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.sources == builder.config.sources == {}
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("src", "foo", "bar.py")
def test_global_invalid_type(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": ""}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.sources` must be a mapping or array of strings"):
_ = builder.config.sources
def test_global_array(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": ["src"]}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "")] == ""
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("foo", "bar.py")
def test_global_array_source_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": [0]}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Source #1 in field `tool.hatch.build.sources` must be a string"):
_ = builder.config.sources
def test_global_array_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": [""]}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(ValueError, match="Source #1 in field `tool.hatch.build.sources` cannot be an empty string"):
_ = builder.config.sources
def test_global_mapping(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": {"src/foo": "renamed"}}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "foo", "")] == pjoin("renamed", "")
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("renamed", "bar.py")
def test_global_mapping_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": {"": "renamed"}}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[""] == pjoin("renamed", "")
assert builder.config.get_distribution_path("bar.py") == pjoin("renamed", "bar.py")
assert builder.config.get_distribution_path(pjoin("foo", "bar.py")) == pjoin("renamed", "foo", "bar.py")
def test_global_mapping_path_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": {"src/foo": ""}}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "foo", "")] == ""
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == "bar.py"
def test_global_mapping_replacement_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": {"src/foo": 0}}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(
TypeError, match="Path for source `src/foo` in field `tool.hatch.build.sources` must be a string"
):
_ = builder.config.sources
def test_target_invalid_type(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": ""}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Field `tool.hatch.build.targets.foo.sources` must be a mapping or array of strings"
):
_ = builder.config.sources
def test_target_array(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": ["src"]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "")] == ""
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("foo", "bar.py")
def test_target_array_source_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": [0]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Source #1 in field `tool.hatch.build.targets.foo.sources` must be a string"
):
_ = builder.config.sources
def test_target_array_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": [""]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
ValueError, match="Source #1 in field `tool.hatch.build.targets.foo.sources` cannot be an empty string"
):
_ = builder.config.sources
def test_target_mapping(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": {"src/foo": "renamed"}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "foo", "")] == pjoin("renamed", "")
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("renamed", "bar.py")
def test_target_mapping_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": {"": "renamed"}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.sources) == 1
assert builder.config.sources[""] == pjoin("renamed", "")
assert builder.config.get_distribution_path(pjoin("bar.py")) == pjoin("renamed", "bar.py")
def test_target_mapping_path_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": {"src/foo": ""}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "foo", "")] == ""
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == "bar.py"
def test_target_mapping_replacement_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"sources": {"src/foo": 0}}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError,
match="Path for source `src/foo` in field `tool.hatch.build.targets.foo.sources` must be a string",
):
_ = builder.config.sources
def test_target_overrides_global(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": ["src"], "targets": {"foo": {"sources": ["pkg"]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("pkg", "")] == ""
assert builder.config.get_distribution_path(pjoin("pkg", "foo", "bar.py")) == pjoin("foo", "bar.py")
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("src", "foo", "bar.py")
def test_no_source(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": ["bar"]}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("bar", "")] == ""
assert builder.config.get_distribution_path(pjoin("foo", "bar.py")) == pjoin("foo", "bar.py")
def test_compatible_with_packages(self, isolation):
config = {"tool": {"hatch": {"build": {"sources": {"src/foo": "renamed"}, "packages": ["src/foo"]}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.sources) == 1
assert builder.config.sources[pjoin("src", "foo", "")] == pjoin("renamed", "")
assert builder.config.get_distribution_path(pjoin("src", "foo", "bar.py")) == pjoin("renamed", "bar.py")
|
TestSources
|
python
|
pytorch__pytorch
|
torch/distributed/elastic/agent/server/api.py
|
{
"start": 12540,
"end": 14149
}
|
class ____:
"""Return results of the worker executions.
Run results follow an "all-or-nothing" policy where the run is successful if and
only if ALL local workers managed by this agent complete successfully.
If the result is successful (e.g. ``is_failed() = False``) then the ``return_values``
field contains the outputs (return values) of the workers managed by THIS agent mapped
by their GLOBAL ranks. That is ``result.return_values[0]`` is the return value of
global rank 0.
.. note:: ``return_values`` are only meaningful for when the worker entrypoint
is a function. Workers specified as a binary entrypoint do not canonically
have a return value and the ``return_values`` field is meaningless and
may be empty.
If ``is_failed()`` returns ``True`` then the ``failures`` field contains the
failure information, again, mapped by the GLOBAL rank of the worker that failed.
The keys in ``return_values`` and ``failures`` are mutually exclusive, that is,
a worker's final state can only be one of: succeeded, failed. Workers intentionally
terminated by the agent according to the agent's restart policy, are not represented
in either ``return_values`` nor ``failures``.
"""
state: WorkerState
return_values: dict[int, Any] = field(default_factory=dict)
failures: dict[int, ProcessFailure] = field(default_factory=dict)
def is_failed(self) -> bool:
return self.state == WorkerState.FAILED
def _get_fq_hostname() -> str:
return socket.getfqdn(socket.gethostname())
|
RunResult
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_real_transforms.py
|
{
"start": 7550,
"end": 7967
}
|
class ____(_TestDCTBase):
def test_definition_ortho(self):
# Test orthornomal mode.
dt = np.result_type(np.float32, self.rdt)
for xr in X:
x = np.array(xr, dtype=self.rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=self.dec)
|
_TestDCTIIIBase
|
python
|
walkccc__LeetCode
|
solutions/1749. Maximum Absolute Sum of Any Subarray/1749.py
|
{
"start": 0,
"end": 252
}
|
class ____:
def maxAbsoluteSum(self, nums):
ans = -math.inf
maxSum = 0
minSum = 0
for num in nums:
maxSum = max(num, maxSum + num)
minSum = min(num, minSum + num)
ans = max(ans, maxSum, -minSum)
return ans
|
Solution
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/programmatic_disable_button.py
|
{
"start": 133,
"end": 900
}
|
class ____(App):
CSS = """
Screen {
align: center middle;
}
"""
BINDINGS = [("space", "toggle_button", "Toggle Button")]
def compose(self) -> ComposeResult:
with Center():
yield Label("Hover the button then hit space")
with Center():
yield Button("Enabled", id="disable-btn")
yield Footer()
def action_toggle_button(self) -> None:
self.app.bell()
button = self.query_one("#disable-btn", Button)
if button.disabled is False:
button.disabled = True
button.label = "Disabled"
else:
button.disabled = False
button.label = "Enabled"
if __name__ == "__main__":
app = ExampleApp()
app.run()
|
ExampleApp
|
python
|
astropy__astropy
|
astropy/cosmology/_src/utils.py
|
{
"start": 2344,
"end": 3920
}
|
class ____(Protocol):
shape: tuple[int, ...]
def aszarr(
z: Quantity | NDArray[Any] | ArrayLike | ScalarTypes | HasShape, /
) -> NDArray[Any]:
"""Redshift as an Array duck type.
Allows for any ndarray ducktype by checking for attribute "shape".
"""
# Scalars
if isinstance(z, SCALAR_TYPES):
return np.asarray(z)
# Quantities. We do this before checking for normal ndarray because Quantity is a
# subclass of ndarray.
elif isinstance(z, Quantity):
return z.to_value(cu.redshift)[...]
# Arrays
elif isinstance(z, np.ndarray):
return z
return Quantity(z, cu.redshift, copy=COPY_IF_NEEDED, subok=True).view(np.ndarray)
# ===================================================================
def deprecated_keywords(
*kws: str, since: str | tuple[str, ...]
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Deprecate calling one or more arguments as keywords.
Parameters
----------
*kws: str
Names of the arguments that will become positional-only.
since : str, float, or tuple of str or float
The release at which the old argument became deprecated. Can be a single
version (e.g., "7.0" or 7.0) or a tuple of versions for multiple arguments.
"""
return functools.partial(_depr_kws, kws=kws, since=since)
def _depr_kws(
func: Callable[P, R], /, kws: tuple[str, ...], since: str | tuple[str, ...]
) -> Callable[P, R]:
wrapper = _depr_kws_wrap(func, kws, since)
functools.update_wrapper(wrapper, func)
return wrapper
|
HasShape
|
python
|
getsentry__sentry
|
src/sentry/shared_integrations/response/mapping.py
|
{
"start": 167,
"end": 452
}
|
class ____(dict, BaseApiResponse):
def __init__(self, data: Mapping[str, Any], *args: Any, **kwargs: Any) -> None:
dict.__init__(self, data)
BaseApiResponse.__init__(self, *args, **kwargs)
@property
def json(self) -> Any:
return self
|
MappingApiResponse
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vsts/test_provider.py
|
{
"start": 6445,
"end": 10827
}
|
class ____(TestCase):
def setUp(self) -> None:
responses.reset()
account_id = "1234567-8910"
self.base_url = "http://sentry2.visualstudio.com/"
self.accounts: list[dict[str, Any]] = [
{
"accountId": "1234567-89",
"NamespaceId": "00000000-0000-0000-0000-000000000000",
"accountName": "sentry",
"OrganizationName": None,
"AccountType": 0,
"AccountOwner": "00000000-0000-0000-0000-000000000000",
"CreatedBy": "00000000-0000-0000-0000-000000000000",
"CreatedDate": "0001-01-01T00:00:00",
"AccountStatus": 0,
"StatusReason": None,
"LastUpdatedBy": "00000000-0000-0000-0000-000000000000",
"Properties": {},
},
{
"accountId": account_id,
"NamespaceId": "00000000-0000-0000-0000-000000000000",
"accountName": "sentry2",
"OrganizationName": None,
"AccountType": 0,
"AccountOwner": "00000000-0000-0000-0000-000000000000",
"CreatedBy": "00000000-0000-0000-0000-000000000000",
"CreatedDate": "0001-01-01T00:00:00",
"AccountStatus": 0,
"StatusReason": None,
"LastUpdatedBy": "00000000-0000-0000-0000-000000000000",
"Properties": {},
},
]
responses.add(
responses.GET,
"https://app.vssps.visualstudio.com/_apis/accounts",
json={"value": self.accounts, "count": len(self.accounts)},
status=200,
)
responses.add(
responses.GET,
"https://app.vssps.visualstudio.com/_apis/resourceareas/79134C72-4A58-4B42-976C-04E7115F32BF?hostId=%s&api-preview=5.0-preview.1"
% account_id,
json={"locationUrl": self.base_url},
)
@responses.activate
def test_dispatch(self) -> None:
view = AccountConfigView()
request = HttpRequest()
request.POST.update({"account": "1234567-8910"})
pipeline = Mock()
pipeline.state = {
"accounts": self.accounts,
"identity": {"data": {"access_token": "123456789"}},
}
pipeline.fetch_state = lambda key: pipeline.state[key]
pipeline.bind_state = lambda name, value: pipeline.state.update({name: value})
view.dispatch(request, pipeline)
assert pipeline.fetch_state(key="account") == self.accounts[1]
assert pipeline.next_step.call_count == 1
@responses.activate
def test_get_accounts(self) -> None:
view = AccountConfigView()
accounts = view.get_accounts("access-token", 123)
assert accounts is not None
assert accounts["value"][0]["accountName"] == "sentry"
assert accounts["value"][1]["accountName"] == "sentry2"
@responses.activate
def test_account_form(self) -> None:
account_form = AccountForm(self.accounts)
field = account_form.fields["account"]
assert isinstance(field, ChoiceField)
assert field.choices == [
("1234567-89", "sentry"),
("1234567-8910", "sentry2"),
]
@responses.activate
@patch("sentry.integrations.vsts.integration.get_user_info")
@patch("sentry.integrations.vsts.integration.render_to_response")
def test_no_accounts_received(
self, mock_render_to_response: MagicMock, mock_get_user_info: MagicMock
) -> None:
responses.reset()
responses.add(
responses.GET,
"https://app.vssps.visualstudio.com/_apis/accounts",
json={"value": [], "count": 0},
status=200,
)
view = AccountConfigView()
request = Mock()
request.POST = {}
request.user = self.user
pipeline = Mock()
pipeline.fetch_state = lambda key: {"data": {"access_token": "1234567890"}}
pipeline.organization = self.organization
view.dispatch(request, pipeline)
assert mock_get_user_info.called is True
assert mock_render_to_response.called is True
assert mock_render_to_response.call_args[1]["context"] == {"no_accounts": True}
@control_silo_test
|
TestAccountConfigView
|
python
|
huggingface__transformers
|
src/transformers/models/phimoe/configuration_phimoe.py
|
{
"start": 873,
"end": 10474
}
|
class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PhimoeModel`]. It is used to instantiate a Phi-moe
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the
[microsoft/Phi-3.5-MoE-instruct](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32064):
Vocabulary size of the Phimoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PhimoeModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 6400):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
sliding_window (`int`, *optional*):
Sliding window attention window size. If not specified, will default to `262144`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
num_experts_per_tok (`int`, *optional*, defaults to 2):
The number of experts to root per-token, can be also interpreted as the `top-p` routing
parameter
num_local_experts (`int`, *optional*, defaults to 16):
Number of experts per Sparse MLP layer.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss. See [here]() for more details
router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
The aux loss factor for the total loss.
router_jitter_noise (`float`, *optional*, defaults to 0.01):
Amount of noise to add to the router.
input_jitter_noise (`float`, *optional*, defaults to 0.0): Input jitter noise
attention_bias (`bool`, *optional*, defaults to `False`): Attention bias
lm_head_bias (`bool`, *optional*, defaults to `False`): LM head bias
Example:
```python
>>> from transformers import PhimoeModel, PhimoeConfig
>>> # Initializing a Phi-3 style configuration
>>> configuration = PhimoeConfig.from_pretrained("microsoft/Phi-3.5-MoE-instruct")
>>> # Initializing a model from the configuration
>>> model = PhimoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "phimoe"
keys_to_ignore_at_inference = ["past_key_values"]
default_theta = 1000000.0
def __init__(
self,
vocab_size: Optional[int] = 32064,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 6400,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 4096 * 32,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-5,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[int] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
sliding_window: Optional[int] = None,
attention_dropout: Optional[float] = 0.0,
num_experts_per_tok: Optional[int] = 2,
num_local_experts: Optional[int] = 16,
output_router_logits: Optional[bool] = False,
router_aux_loss_coef: Optional[float] = 0.001,
router_jitter_noise: Optional[float] = 0.01,
input_jitter_noise: Optional[float] = 0.0,
attention_bias: Optional[bool] = False,
lm_head_bias: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.attention_bias = attention_bias
self.lm_head_bias = lm_head_bias
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
self.output_router_logits = output_router_logits
self.router_aux_loss_coef = router_aux_loss_coef
self.router_jitter_noise = router_jitter_noise
self.input_jitter_noise = input_jitter_noise
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def validate_rope(self, ignore_keys=None):
"""
Validate the `rope_parameters` configuration.
"""
super().validate_rope(ignore_keys=ignore_keys)
# Run model-specific rope validation
if self.rope_parameters["rope_type"] != "default":
if "original_max_position_embeddings" in self.rope_parameters:
self.original_max_position_embeddings = self.rope_parameters["original_max_position_embeddings"]
rope_parameters_short_mscale = self.rope_parameters.get("short_mscale", None)
rope_parameters_long_mscale = self.rope_parameters.get("long_mscale", None)
if not isinstance(rope_parameters_short_mscale, (int, float)):
raise TypeError(
f"`rope_parameters`'s short_mscale field must be a number, got {rope_parameters_short_mscale}"
)
if not isinstance(rope_parameters_long_mscale, (int, float)):
raise TypeError(
f"`rope_parameters`'s long_mscale field must be a number, got {rope_parameters_long_mscale}"
)
__all__ = ["PhimoeConfig"]
|
PhimoeConfig
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/autoscaling_requester.py
|
{
"start": 780,
"end": 5152
}
|
class ____:
"""Actor to make resource requests to autoscaler for the datasets.
The resource requests are set to timeout after RESOURCE_REQUEST_TIMEOUT seconds.
For those live requests, we keep track of the last request made for each execution,
which overrides all previous requests it made; then sum the requested amounts
across all executions as the final request to the autoscaler.
"""
def __init__(self):
# execution_id -> (List[Dict], expiration timestamp)
self._resource_requests = {}
# TTL for requests.
self._timeout = RESOURCE_REQUEST_TIMEOUT
self._self_handle = ray.get_runtime_context().current_actor
# Start a thread to purge expired requests periodically.
def purge_thread_run():
while True:
time.sleep(PURGE_INTERVAL)
# Call purge_expired_requests() as an actor task,
# so we don't need to handle multi-threading.
ray.get(self._self_handle.purge_expired_requests.remote())
self._purge_thread = threading.Thread(target=purge_thread_run, daemon=True)
self._purge_thread.start()
def purge_expired_requests(self):
self._purge()
ray.autoscaler.sdk.request_resources(bundles=self._aggregate_requests())
def request_resources(self, req: List[Dict], execution_id: str):
# Purge expired requests before making request to autoscaler.
self._purge()
# For the same execution_id, we track the latest resource request and
# the its expiration timestamp.
self._resource_requests[execution_id] = (
req,
time.time() + self._timeout,
)
# We aggregate the resource requests across all execution_id's to Ray
# autoscaler.
ray.autoscaler.sdk.request_resources(bundles=self._aggregate_requests())
def _purge(self):
# Purge requests that are stale.
now = time.time()
for k, (_, t) in list(self._resource_requests.items()):
if t < now:
self._resource_requests.pop(k)
def _aggregate_requests(self) -> List[Dict]:
req = []
for _, (r, _) in self._resource_requests.items():
req.extend(r)
def get_cpus(req):
num_cpus = 0
for r in req:
if "CPU" in r:
num_cpus += r["CPU"]
return num_cpus
# Round up CPUs to exceed total cluster CPUs so it can actually upscale.
# This is to handle the issue where the autoscaling is driven by memory
# pressure (rather than CPUs) from streaming executor. In such case, simply
# asking for incremental CPUs (e.g. 1 CPU for each ready operator) may not
# actually be able to trigger autoscaling if existing CPUs in cluster can
# already satisfy the incremental CPUs request.
num_cpus = get_cpus(req)
if num_cpus > 0:
total = ray.cluster_resources()
if "CPU" in total and num_cpus <= total["CPU"]:
delta = (
math.ceil(ARTIFICIAL_CPU_SCALING_FACTOR * total["CPU"]) - num_cpus
)
req.extend([{"CPU": 1}] * delta)
return req
def _test_set_timeout(self, ttl):
"""Set the timeout. This is for test only"""
self._timeout = ttl
# Creating/getting an actor from multiple threads is not safe.
# https://github.com/ray-project/ray/issues/41324
_autoscaling_requester_lock: threading.RLock = threading.RLock()
def get_or_create_autoscaling_requester_actor():
ctx = DataContext.get_current()
scheduling_strategy = ctx.scheduling_strategy
# Pin the autoscaling requester actor to the local node so it fate-shares with the driver.
# Note: for Ray Client, the ray.get_runtime_context().get_node_id() should
# point to the head node.
scheduling_strategy = NodeAffinitySchedulingStrategy(
ray.get_runtime_context().get_node_id(),
soft=False,
)
with _autoscaling_requester_lock:
return AutoscalingRequester.options(
name="AutoscalingRequester",
namespace="AutoscalingRequester",
get_if_exists=True,
lifetime="detached",
scheduling_strategy=scheduling_strategy,
).remote()
|
AutoscalingRequester
|
python
|
xlwings__xlwings
|
xlwings/main.py
|
{
"start": 98185,
"end": 99104
}
|
class ____:
def __init__(self, impl):
"""
Represents a PageSetup object.
.. versionadded:: 0.24.2
"""
self.impl = impl
@property
def api(self):
"""
Returns the native object (``pywin32`` or ``appscript`` obj)
of the engine being used.
.. versionadded:: 0.24.2
"""
return self.impl.api
@property
def print_area(self):
"""
Gets or sets the range address that defines the print area.
Examples
--------
>>> mysheet.page_setup.print_area = '$A$1:$B$3'
>>> mysheet.page_setup.print_area
'$A$1:$B$3'
>>> mysheet.page_setup.print_area = None # clear the print_area
.. versionadded:: 0.24.2
"""
return self.impl.print_area
@print_area.setter
def print_area(self, value):
self.impl.print_area = value
|
PageSetup
|
python
|
keras-team__keras
|
keras/src/optimizers/adagrad_test.py
|
{
"start": 174,
"end": 3249
}
|
class ____(testing.TestCase):
def test_config(self):
optimizer = Adagrad(
learning_rate=0.5,
initial_accumulator_value=0.2,
epsilon=1e-5,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Adagrad(learning_rate=0.5)
grads = ops.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.5233, 1.5007, 2.5005, 3.5061], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
grads, var1, var2, var3 = (
ops.zeros(()),
backend.Variable(2.0),
backend.Variable(2.0, name="exclude"),
backend.Variable(2.0),
)
optimizer_1 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_1.apply_gradients(zip([grads], [var1]))
optimizer_2 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
optimizer_3 = Adagrad(learning_rate=1.0, weight_decay=0.004)
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.assertAlmostEqual(var1.numpy(), 1.9760959, decimal=6)
self.assertAlmostEqual(var2.numpy(), 2.0, decimal=6)
self.assertAlmostEqual(var3.numpy(), 2.0, decimal=6)
def test_correctness_with_golden(self):
optimizer = Adagrad(
learning_rate=0.2, initial_accumulator_value=0.3, epsilon=1e-6
)
x = backend.Variable(np.ones([10]))
grads = ops.arange(0.1, 1.1, 0.1)
first_grads = ops.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963, 0.9963],
[0.9604, 0.9278, 0.9003, 0.8784, 0.8615, 0.8487, 0.8388, 0.8313, 0.8255, 0.8209],
[0.9251, 0.8629, 0.8137, 0.7768, 0.7497, 0.7298, 0.7151, 0.704, 0.6956, 0.6891],
[0.8903, 0.8012, 0.7342, 0.6862, 0.6521, 0.6277, 0.6099, 0.5967, 0.5867, 0.579],
[0.856, 0.7422, 0.6604, 0.6037, 0.5644, 0.5367, 0.5168, 0.5021, 0.491, 0.4825]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Adagrad(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Adagrad(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
AdagradTest
|
python
|
falconry__falcon
|
tests/test_wsgi_servers.py
|
{
"start": 5031,
"end": 8839
}
|
class ____:
def test_get(self, server_url, requests):
resp = requests.get(server_url + '/hello', timeout=_REQUEST_TIMEOUT)
assert resp.status_code == 200
assert resp.text == 'Hello, World!\n'
assert resp.headers.get('Content-Type') == 'text/plain; charset=utf-8'
assert resp.headers.get('X-Falcon') == 'peregrine'
def test_get_deprecated(self, server_url, requests):
resp = requests.get(server_url + '/deprecated', timeout=_REQUEST_TIMEOUT)
# Since it tries to use resp.add_link() we expect an unhandled error
assert resp.status_code == 500
def test_post_multipart_form(self, server_url, requests):
size = random.randint(8 * _SIZE_1_MB, 15 * _SIZE_1_MB)
data = os.urandom(size)
digest = hashlib.sha1(data).hexdigest()
files = {
'random': ('random.dat', data),
'message': ('hello.txt', b'Hello, World!\n'),
}
resp = requests.post(
server_url + '/forms', files=files, timeout=_REQUEST_TIMEOUT
)
assert resp.status_code == 200
assert resp.json() == {
'message': {
'filename': 'hello.txt',
'sha1': '60fde9c2310b0d4cad4dab8d126b04387efba289',
},
'random': {
'filename': 'random.dat',
'sha1': digest,
},
}
def test_static_file(self, server_url, requests):
resp = requests.get(
server_url + '/tests/test_wsgi_servers.py', timeout=_REQUEST_TIMEOUT
)
assert resp.status_code == 200
# TODO(vytas): In retrospect, it would be easier to maintain these
# static route tests by creating a separate file instead of relying
# on the content of this same __file__.
assert resp.text.startswith(
'import hashlib\n'
'import os\n'
'import random\n'
'import subprocess\n'
'import sys\n'
'import time\n'
)
assert resp.headers.get('Content-Disposition') == (
'attachment; filename="test_wsgi_servers.py"'
)
content_length = int(resp.headers['Content-Length'])
file_size = os.path.getsize(__file__)
assert len(resp.content) == content_length == file_size
@pytest.mark.parametrize(
'byte_range,expected_head',
[
('7-', b'hashlib'),
('2-6', b'port'),
('32-38', b'random'),
('-47', b'The content of this comment is part of a test.\n'),
],
)
def test_static_file_byte_range(
self, byte_range, expected_head, wsgi_server, server_url, requests
):
if wsgi_server == 'meinheld':
pytest.xfail(
"Meinheld's file_wrapper fails without a fileno(), see also: "
'https://github.com/mopemope/meinheld/issues/130'
)
resp = requests.get(
server_url + '/tests/test_wsgi_servers.py',
timeout=_REQUEST_TIMEOUT,
headers={'Range': 'bytes=' + byte_range},
)
assert resp.status_code == 206
assert resp.content.startswith(expected_head)
content_length = int(resp.headers['Content-Length'])
assert len(resp.content) == content_length
file_size = os.path.getsize(__file__)
content_range_size = int(resp.headers['Content-Range'].split('/')[-1])
assert file_size == content_range_size
# TODO(vytas): In retrospect, it would be easier to maintain these
# static route tests by creating a separate file instead of relying
# on the content of this same __file__.
# NOTE(vytas): The content of this comment is part of a test.
|
TestWSGIServer
|
python
|
Textualize__textual
|
tests/test_path.py
|
{
"start": 215,
"end": 281
}
|
class ____(App[None]):
CSS_PATH = "test.tcss"
|
RelativePathStrApp
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/caching/storage/cache_storage_protocol.py
|
{
"start": 2643,
"end": 2765
}
|
class ____(CacheStorageError):
"""Raised when the key is not found in the cache storage."""
|
CacheStorageKeyNotFoundError
|
python
|
pytorch__pytorch
|
torch/fx/graph.py
|
{
"start": 8257,
"end": 8742
}
|
class ____:
def __init__(self, graph: "Graph", direction: Literal["_prev", "_next"] = "_next"):
assert direction in ("_next", "_prev")
self.graph = graph
self.direction = direction
def __len__(self):
return self.graph._len
def __iter__(self):
return _NodeIter(self.graph._root, self.direction == "_prev")
def __reversed__(self):
return _node_list(self.graph, "_next" if self.direction == "_prev" else "_prev")
|
_node_list
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 81546,
"end": 82134
}
|
class ____(sgqlc.types.Enum):
"""The merge options available for pull requests to this repository.
Enumeration Choices:
* `MERGE`: The pull request is added to the base branch in a merge
commit.
* `REBASE`: Commits from the pull request are added onto the base
branch individually without a merge commit.
* `SQUASH`: The pull request's commits are squashed into a single
commit before they are merged to the base branch.
"""
__schema__ = github_schema
__choices__ = ("MERGE", "REBASE", "SQUASH")
|
RepoChangeMergeSettingAuditEntryMergeType
|
python
|
django__django
|
tests/constraints/models.py
|
{
"start": 4459,
"end": 4848
}
|
class ____(models.Model):
age = models.IntegerField()
class Meta:
abstract = True
required_db_features = {
"supports_table_check_constraints",
}
constraints = [
models.CheckConstraint(
condition=models.Q(age__gte=18),
name="%(app_label)s_%(class)s_adult",
),
]
|
AbstractModel
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 779049,
"end": 786450
}
|
class ____(MarkPropDefnumber, NumericMarkPropDef):
"""
FieldOrDatumDefWithConditionDatumDefnumber schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {"$ref": "#/definitions/FieldOrDatumDefWithCondition<DatumDef,number>"}
def __init__(
self,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
datum: Optional[
Temporal | Parameter | SchemaBase | Map | PrimitiveValue_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
bandPosition=bandPosition,
condition=condition,
datum=datum,
title=title,
type=type,
**kwds,
)
|
FieldOrDatumDefWithConditionDatumDefnumber
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_ansible_collector.py
|
{
"start": 10644,
"end": 11046
}
|
class ____(collector.BaseFactCollector):
name = 'requires_something'
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
fact_dict = {}
fact_dict['needed_fact'] = collected_facts['needed_fact']
fact_dict['compound_fact'] = "compound-%s" % collected_facts['needed_fact']
return fact_dict
|
RequiresOtherFactCollector
|
python
|
pytorch__pytorch
|
benchmarks/dynamo/common.py
|
{
"start": 55452,
"end": 57815
}
|
class ____:
def scale(self, loss):
return loss
def get_dynamo_stats():
# TODO: consider deepcopy'ing the entire counters struct and
# adding a helper to do subtraction on it
return collections.Counter(
{
"calls_captured": torch._dynamo.utils.counters["stats"]["calls_captured"],
"unique_graphs": torch._dynamo.utils.counters["stats"]["unique_graphs"],
"graph_breaks": sum(torch._dynamo.utils.counters["graph_break"].values()),
# NB: The plus removes zero counts
"unique_graph_breaks": len(+torch._dynamo.utils.counters["graph_break"]),
"autograd_captures": torch._dynamo.utils.counters["compiled_autograd"][
"captures"
],
"autograd_compiles": torch._dynamo.utils.counters["compiled_autograd"][
"compiles"
],
"cudagraph_skips": torch._dynamo.utils.counters["inductor"][
"cudagraph_skips"
],
}
)
@contextmanager
def maybe_init_distributed(should_init_distributed, rank, world_size, port="6789"):
try:
if should_init_distributed:
torch.cuda.set_device(rank)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = port
torch.distributed.init_process_group(
"nccl", rank=rank, world_size=world_size
)
yield
finally:
if should_init_distributed:
torch.distributed.destroy_process_group()
@contextmanager
def maybe_snapshot_memory(should_snapshot_memory, suffix):
# Enables Memory Snapshot tool for memory deep dives:
# https://pytorch.org/blog/understanding-gpu-memory-1/
try:
if should_snapshot_memory:
torch.cuda.memory._record_memory_history(max_entries=100000)
yield
finally:
if should_snapshot_memory:
try:
torch.cuda.memory._dump_snapshot(
os.path.join(
torch._dynamo.config.base_dir,
f"{output_filename.rstrip('.csv')}_{suffix}.pickle",
)
)
except Exception:
log.exception("Failed to save memory snapshot")
torch.cuda.memory._record_memory_history(enabled=None)
|
DummyGradScaler
|
python
|
apache__airflow
|
airflow-core/tests/unit/utils/test_logging_mixin.py
|
{
"start": 1807,
"end": 3375
}
|
class ____:
def setup_method(self):
warnings.filterwarnings(action="always")
def test_set_context(self, child_logger, parent_child_handlers):
handler1, handler2 = parent_child_handlers
handler1.set_context = mock.MagicMock()
handler2.set_context = mock.MagicMock()
parent = logging.getLogger(__name__)
parent.propagate = False
parent.addHandler(handler1)
log = parent.getChild("child")
log.addHandler(handler2)
log.propagate = True
value = "test"
set_context(log, value)
handler1.set_context.assert_called_once_with(value)
handler2.set_context.assert_called_once_with(value)
def test_default_logger_name(self):
"""
Ensure that by default, object logger name is equals to its module and class path.
"""
class DummyClass(LoggingMixin):
pass
assert DummyClass().log.name == "unit.utils.test_logging_mixin.DummyClass"
def test_log_config_logger_name_correctly_prefix_logger_name(self):
"""
Ensure that when a class has `_log_config_logger_name`, it is used as prefix in the final logger
name.
"""
class ClassWithParentLogConfig(LoggingMixin):
_log_config_logger_name: str = "airflow.tasks"
assert (
ClassWithParentLogConfig().log.name
== "airflow.tasks.unit.utils.test_logging_mixin.ClassWithParentLogConfig"
)
def teardown_method(self):
warnings.resetwarnings()
|
TestLoggingMixin
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 249059,
"end": 251382
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3, 3]", L_y_: "f32[3, 3, 3]"):
l_x_ = L_x_
l_y_ = L_y_
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(3, 'error'); _vmap_increment_nesting = None
child: "f32[3, 3]" = torch._functorch.predispatch._add_batch_dim(l_x_, 0, 1); l_x_ = None
child_1: "f32[3, 3]" = torch._functorch.predispatch._add_batch_dim(l_y_, 0, 1); l_y_ = None
lazy_load_decompositions_1 = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions_1 = None
_vmap_increment_nesting_1 = torch._functorch.predispatch._vmap_increment_nesting(3, 'error'); _vmap_increment_nesting_1 = None
_add_batch_dim_2: "f32[3]" = torch._functorch.predispatch._add_batch_dim(child, 1, 2); child = None
_add_batch_dim_3: "f32[3]" = torch._functorch.predispatch._add_batch_dim(child_1, 1, 2); child_1 = None
batched_outputs: "f32[3]" = _add_batch_dim_2 + _add_batch_dim_3; _add_batch_dim_2 = _add_batch_dim_3 = None
batched_outputs_1: "f32[3, 3]" = torch._functorch.predispatch._remove_batch_dim(batched_outputs, 2, 3, 0); batched_outputs = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
_remove_batch_dim_1: "f32[3, 3, 3]" = torch._functorch.predispatch._remove_batch_dim(batched_outputs_1, 1, 3, 0); batched_outputs_1 = None
_vmap_decrement_nesting_1 = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting_1 = None
return (_remove_batch_dim_1,)
""",
)
def test_vmap_over_vmap_captured(self):
x = torch.ones(2, 3)
y = torch.ones(5, 3)
def fn(x):
return torch.func.vmap(torch.func.vmap(lambda y: x * y))(y)
wrapped_gm = self._compile_check(fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
realpython__materials
|
django-todo-list/source_code_final/todo_app/apps.py
|
{
"start": 36,
"end": 147
}
|
class ____(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "todo_app"
|
TodoAppConfig
|
python
|
pandas-dev__pandas
|
pandas/tests/indexes/string/test_indexing.py
|
{
"start": 3668,
"end": 5268
}
|
class ____:
@pytest.mark.parametrize("null", [None, np.nan, float("nan"), pd.NA])
def test_get_indexer_non_unique_nas(
self, any_string_dtype, null, using_infer_string
):
index = Index(["a", "b", null], dtype=any_string_dtype)
indexer, missing = index.get_indexer_non_unique(["a", null])
if using_infer_string:
expected_indexer = np.array([0, 2], dtype=np.intp)
expected_missing = np.array([], dtype=np.intp)
elif any_string_dtype == "string" and not _equivalent_na(
any_string_dtype, null
):
expected_indexer = np.array([0, -1], dtype=np.intp)
expected_missing = np.array([1], dtype=np.intp)
else:
expected_indexer = np.array([0, 2], dtype=np.intp)
expected_missing = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
# actually non-unique
index = Index(["a", null, "b", null], dtype=any_string_dtype)
indexer, missing = index.get_indexer_non_unique(["a", null])
if using_infer_string:
expected_indexer = np.array([0, 1, 3], dtype=np.intp)
elif any_string_dtype == "string" and not _equivalent_na(
any_string_dtype, null
):
pass
else:
expected_indexer = np.array([0, 1, 3], dtype=np.intp)
tm.assert_numpy_array_equal(indexer, expected_indexer)
tm.assert_numpy_array_equal(missing, expected_missing)
|
TestGetIndexerNonUnique
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/dataproc.py
|
{
"start": 6605,
"end": 6829
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Dataproc Batches List Link."""
name = "Dataproc Batches List"
key = "dataproc_batches_list"
format_str = DATAPROC_BATCHES_LINK
|
DataprocBatchesListLink
|
python
|
doocs__leetcode
|
solution/1300-1399/1396.Design Underground System/Solution.py
|
{
"start": 0,
"end": 794
}
|
class ____:
def __init__(self):
self.ts = {}
self.d = {}
def checkIn(self, id: int, stationName: str, t: int) -> None:
self.ts[id] = (t, stationName)
def checkOut(self, id: int, stationName: str, t: int) -> None:
t0, station = self.ts[id]
x = self.d.get((station, stationName), (0, 0))
self.d[(station, stationName)] = (x[0] + t - t0, x[1] + 1)
def getAverageTime(self, startStation: str, endStation: str) -> float:
x = self.d[(startStation, endStation)]
return x[0] / x[1]
# Your UndergroundSystem object will be instantiated and called as such:
# obj = UndergroundSystem()
# obj.checkIn(id,stationName,t)
# obj.checkOut(id,stationName,t)
# param_3 = obj.getAverageTime(startStation,endStation)
|
UndergroundSystem
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/widgets/toolbars.py
|
{
"start": 1478,
"end": 1907
}
|
class ____(Window):
def __init__(self, text: AnyFormattedText, style: str = "", **kw: Any) -> None:
# Note: The style needs to be applied to the toolbar as a whole, not
# just the `FormattedTextControl`.
super().__init__(
FormattedTextControl(text, **kw),
style=style,
dont_extend_height=True,
height=Dimension(min=1),
)
|
FormattedTextToolbar
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-profit-from-trading-stocks.py
|
{
"start": 624,
"end": 1123
}
|
class ____(object):
def maximumProfit(self, present, future, budget):
"""
:type present: List[int]
:type future: List[int]
:type budget: int
:rtype: int
"""
dp = [[0]*(budget+1) for _ in xrange(2)]
for i, (p, f) in enumerate(itertools.izip(present, future)):
for b in xrange(budget+1):
dp[(i+1)%2][b] = max(dp[i%2][b], (dp[i%2][b-p]+(f-p) if b-p >= 0 else 0))
return dp[len(present)%2][-1]
|
Solution2
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numeric.py
|
{
"start": 137991,
"end": 139310
}
|
class ____:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
assert_array_almost_equal(np.convolve(d, k)[2:-2], np.full(98, 3))
def test_no_overwrite(self):
d = np.ones(100)
k = np.ones(3)
np.convolve(d, k)
assert_array_equal(d, np.ones(100))
assert_array_equal(k, np.ones(3))
def test_mode(self):
d = np.ones(100)
k = np.ones(3)
default_mode = np.convolve(d, k, mode='full')
with assert_raises(ValueError):
np.convolve(d, k, mode='f')
# integer mode
with assert_raises(ValueError):
np.convolve(d, k, mode=-1)
assert_array_equal(np.convolve(d, k, mode=2), default_mode)
# illegal arguments
with assert_raises(TypeError):
np.convolve(d, k, mode=None)
def test_convolve_empty_input_error_message(self):
"""
Test that convolve raises the correct error message when inputs are empty.
Regression test for gh-30272 (variable swapping bug).
"""
with pytest.raises(ValueError, match="a cannot be empty"):
np.convolve(np.array([]), np.array([1, 2]))
with pytest.raises(ValueError, match="v cannot be empty"):
np.convolve(np.array([1, 2]), np.array([]))
|
TestConvolve
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_points01.py
|
{
"start": 315,
"end": 1128
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_points01.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with point formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "pie"})
data = [2, 5, 4, 1, 7, 4]
worksheet.write_column("A1", data)
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$6",
"points": [{"fill": {"color": "red"}}],
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
run-llama__llama_index
|
llama-index-integrations/retrievers/llama-index-retrievers-kendra/llama_index/retrievers/kendra/base.py
|
{
"start": 361,
"end": 5934
}
|
class ____(BaseRetriever):
"""
AWS Kendra retriever for LlamaIndex.
See https://aws.amazon.com/kendra/ for more info.
Args:
index_id: Kendra Index ID.
query_config: Configuration for querying Kendra.
profile_name: The name of the profile in the ~/.aws/credentials
or ~/.aws/config files, which has either access keys or role information
specified. If not specified, the default credential profile or, if on an
EC2 instance, credentials from IMDS will be used.
region_name: The aws region e.g., `us-west-2`.
Fallback to AWS_DEFAULT_REGION env variable or region specified in
~/.aws/config.
aws_access_key_id: The aws access key id.
aws_secret_access_key: The aws secret access key.
aws_session_token: AWS temporary session token.
Example:
.. code-block:: python
from llama_index.retrievers.kendra import AmazonKendraRetriever
retriever = AmazonKendraRetriever(
index_id="<kendra-index-id>",
query_config={
"PageSize": 4,
"AttributeFilter": {
"EqualsTo": {
"Key": "tag",
"Value": {"StringValue": "space"}
}
}
},
)
"""
# Mapping of Kendra confidence levels to float scores
CONFIDENCE_SCORES = {
"VERY_HIGH": 1.0,
"HIGH": 0.8,
"MEDIUM": 0.6,
"LOW": 0.4,
"NOT_AVAILABLE": 0.0,
}
def __init__(
self,
index_id: str,
query_config: Optional[Dict[str, Any]] = None,
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
):
self._client = get_aws_service_client(
service_name="kendra",
profile_name=profile_name,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
# Create async session with the same credentials
self._async_session = aioboto3.Session(
profile_name=profile_name,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
)
self.index_id = index_id
self.query_config = query_config or {}
super().__init__(callback_manager)
def _parse_response(self, response: Dict[str, Any]) -> List[NodeWithScore]:
"""Parse Kendra response into NodeWithScore objects."""
node_with_score = []
result_items = response.get("ResultItems", [])
for result in result_items:
text = ""
metadata = {}
# Extract text based on result type
if result.get("Type") == "ANSWER":
text = (
result.get("AdditionalAttributes", [{}])[0]
.get("Value", {})
.get("TextWithHighlightsValue", {})
.get("Text", "")
)
elif result.get("Type") == "DOCUMENT":
text = result.get("DocumentExcerpt", {}).get("Text", "")
# Extract metadata
if "DocumentId" in result:
metadata["document_id"] = result["DocumentId"]
if "DocumentTitle" in result:
metadata["title"] = result.get("DocumentTitle", {}).get("Text", "")
if "DocumentURI" in result:
metadata["source"] = result["DocumentURI"]
# Only create nodes for results with actual content
if text:
# Convert Kendra's confidence score to float
confidence = result.get("ScoreAttributes", {}).get(
"ScoreConfidence", "NOT_AVAILABLE"
)
score = self.CONFIDENCE_SCORES.get(confidence, 0.0)
node_with_score.append(
NodeWithScore(
node=TextNode(
text=text,
metadata=metadata,
),
score=score,
)
)
return node_with_score
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Synchronous retrieve method."""
query = query_bundle.query_str
query_params = {
"IndexId": self.index_id,
"QueryText": query.strip(),
**self.query_config,
}
response = self._client.query(**query_params)
return self._parse_response(response)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Asynchronous retrieve method."""
query = query_bundle.query_str
query_params = {
"IndexId": self.index_id,
"QueryText": query.strip(),
**self.query_config,
}
async with self._async_session.client("kendra") as client:
response = await client.query(**query_params)
return self._parse_response(response)
|
AmazonKendraRetriever
|
python
|
sqlalchemy__sqlalchemy
|
test/perf/compiled_extensions/misc.py
|
{
"start": 5059,
"end": 6663
}
|
class ____(Case):
NUMBER = 5_000_000
@staticmethod
def python():
from sqlalchemy.sql import _util_cy
py_util = load_uncompiled_module(_util_cy)
assert not py_util._is_compiled()
return py_util.anon_map
@staticmethod
def cython():
from sqlalchemy.sql import _util_cy
assert _util_cy._is_compiled()
return _util_cy.anon_map
IMPLEMENTATIONS = {"python": python.__func__, "cython": cython.__func__}
def init_objects(self):
self.object_1 = column("x")
self.object_2 = bindparam("y")
self.impl_w_non_present = self.impl()
self.impl_w_present = iwp = self.impl()
iwp.get_anon(self.object_1)
iwp.get_anon(self.object_2)
@classmethod
def update_results(cls, results):
cls._divide_results(results, "cython", "python", "cy / py")
@test_case
def test_make(self):
self.impl()
@test_case
def test_get_anon_np(self):
self.impl_w_non_present.get_anon(self.object_1)
self.impl_w_non_present.clear()
@test_case
def test_get_anon_p(self):
self.impl_w_present.get_anon(self.object_1)
@test_case
def test_get_item_np(self):
self.impl_w_non_present[self.object_1]
self.impl_w_non_present.clear()
@test_case
def test_get_item_p(self):
self.impl_w_present[self.object_1]
@test_case
def test_has_key_np(self):
id(self.object_1) in self.impl_w_non_present
@test_case
def test_has_key_p(self):
id(self.object_1) in self.impl_w_present
|
AnonMap
|
python
|
mwaskom__seaborn
|
seaborn/_core/properties.py
|
{
"start": 11523,
"end": 11659
}
|
class ____(IntervalProperty):
"""Offset for edge-aligned text, in point units."""
_default_range = 0, 5
_legend = False
|
Offset
|
python
|
tiangolo__fastapi
|
fastapi/exceptions.py
|
{
"start": 336,
"end": 1877
}
|
class ____(StarletteHTTPException):
"""
An HTTP exception you can raise in your own code to show errors to the client.
This is for client errors, invalid authentication, invalid data, etc. Not for server
errors in your code.
Read more about it in the
[FastAPI docs for Handling Errors](https://fastapi.tiangolo.com/tutorial/handling-errors/).
## Example
```python
from fastapi import FastAPI, HTTPException
app = FastAPI()
items = {"foo": "The Foo Wrestlers"}
@app.get("/items/{item_id}")
async def read_item(item_id: str):
if item_id not in items:
raise HTTPException(status_code=404, detail="Item not found")
return {"item": items[item_id]}
```
"""
def __init__(
self,
status_code: Annotated[
int,
Doc(
"""
HTTP status code to send to the client.
"""
),
],
detail: Annotated[
Any,
Doc(
"""
Any data to be sent to the client in the `detail` key of the JSON
response.
"""
),
] = None,
headers: Annotated[
Optional[Dict[str, str]],
Doc(
"""
Any headers to send to the client in the response.
"""
),
] = None,
) -> None:
super().__init__(status_code=status_code, detail=detail, headers=headers)
|
HTTPException
|
python
|
pypa__pip
|
src/pip/_internal/metadata/importlib/_compat.py
|
{
"start": 455,
"end": 2804
}
|
class ____(Protocol):
"""A protocol that various path objects conform.
This exists because importlib.metadata uses both ``pathlib.Path`` and
``zipfile.Path``, and we need a common base for type hints (Union does not
work well since ``zipfile.Path`` is too new for our linter setup).
This does not mean to be exhaustive, but only contains things that present
in both classes *that we need*.
"""
@property
def name(self) -> str:
raise NotImplementedError()
@property
def parent(self) -> BasePath:
raise NotImplementedError()
def get_info_location(d: importlib.metadata.Distribution) -> BasePath | None:
"""Find the path to the distribution's metadata directory.
HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
all distributions exist on disk, so importlib.metadata is correct to not
expose the attribute as public. But pip's code base is old and not as clean,
so we do this to avoid having to rewrite too many things. Hopefully we can
eliminate this some day.
"""
return getattr(d, "_path", None)
def parse_name_and_version_from_info_directory(
dist: importlib.metadata.Distribution,
) -> tuple[str | None, str | None]:
"""Get a name and version from the metadata directory name.
This is much faster than reading distribution metadata.
"""
info_location = get_info_location(dist)
if info_location is None:
return None, None
stem, suffix = os.path.splitext(info_location.name)
if suffix == ".dist-info":
name, sep, version = stem.partition("-")
if sep:
return name, version
if suffix == ".egg-info":
name = stem.split("-", 1)[0]
return name, None
return None, None
def get_dist_canonical_name(dist: importlib.metadata.Distribution) -> NormalizedName:
"""Get the distribution's normalized name.
The ``name`` attribute is only available in Python 3.10 or later. We are
targeting exactly that, but Mypy does not know this.
"""
if name := parse_name_and_version_from_info_directory(dist)[0]:
return canonicalize_name(name)
name = cast(Any, dist).name
if not isinstance(name, str):
raise BadMetadata(dist, reason="invalid metadata entry 'name'")
return canonicalize_name(name)
|
BasePath
|
python
|
jazzband__django-simple-history
|
simple_history/models.py
|
{
"start": 47650,
"end": 47762
}
|
class ____:
field: str
old: ModelChangeValue
new: ModelChangeValue
@dataclass(frozen=True)
|
ModelChange
|
python
|
pytorch__pytorch
|
torch/fx/passes/splitter_base.py
|
{
"start": 2323,
"end": 4694
}
|
class ____:
def __init__(
self,
min_acc_module_size=DEFAULT_MIN_ACC_MODULE_SIZE,
skip_fusion=DEFAULT_SKIP_FUSION,
allow_non_tensor=DEFAULT_ALLOW_NON_TENSOR,
max_acc_splits: int = -1,
):
parser = argparse.ArgumentParser()
parser.add_argument(
"--min-acc-module-size",
"--min_acc_module_size",
required=False,
type=int,
help="Minimum size limit of an accelerator subgraph.",
)
parser.add_argument(
"--max-acc-splits",
"--max_acc_splits",
required=False,
type=int,
help="Enforce a maximum number of split subgraphs.",
)
parser.add_argument(
"--skip-fusion",
"--skip_fusion",
default=False,
action="store_true",
help="If true then no fusion groups. Fusion group is used to "
"enforce no non-tensor data flow between submodules. If we don't "
"have this constrain, setting this to false is recommended as it "
"can reduce overhead.",
)
parser.add_argument(
"--allow-non-tensor",
"--allow_non_tensor",
default=False,
action="store_true",
help="For some backends non-tensor data flow between cpu and them "
"are not allowed. Therefore, if a node supported by accelerator but "
"it has non-tensor inputs or outputs to a cpu node we would want to "
"consider it as a cpu node during splitting. However, for some backends "
"we might not care about non-tensor data flow and we can set this option "
"to true to disable the functionality that prevent non-tensor data flow.",
)
args, _unknown = parser.parse_known_args()
self.min_acc_module_size: int = (
args.min_acc_module_size
if args.min_acc_module_size
else min_acc_module_size
)
self.skip_fusion: bool = args.skip_fusion if args.skip_fusion else skip_fusion
self.allow_non_tensor: bool = (
args.allow_non_tensor if args.allow_non_tensor else allow_non_tensor
)
self.max_acc_splits: int = max_acc_splits
@compatibility(is_backward_compatible=False)
|
_SplitterSettingBase
|
python
|
pandas-dev__pandas
|
pandas/tests/series/methods/test_searchsorted.py
|
{
"start": 190,
"end": 2493
}
|
class ____:
def test_searchsorted(self):
ser = Series([1, 2, 3])
result = ser.searchsorted(1, side="left")
assert is_scalar(result)
assert result == 0
result = ser.searchsorted(1, side="right")
assert is_scalar(result)
assert result == 1
def test_searchsorted_numeric_dtypes_scalar(self):
ser = Series([1, 2, 90, 1000, 3e9])
res = ser.searchsorted(30)
assert is_scalar(res)
assert res == 2
res = ser.searchsorted([30])
exp = np.array([2], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_searchsorted_numeric_dtypes_vector(self):
ser = Series([1, 2, 90, 1000, 3e9])
res = ser.searchsorted([91, 2e6])
exp = np.array([3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_searchsorted_datetime64_scalar(self):
ser = Series(date_range("20120101", periods=10, freq="2D"))
val = Timestamp("20120102")
res = ser.searchsorted(val)
assert is_scalar(res)
assert res == 1
def test_searchsorted_datetime64_scalar_mixed_timezones(self):
# GH 30086
ser = Series(date_range("20120101", periods=10, freq="2D", tz="UTC"))
val = Timestamp("20120102", tz="America/New_York")
res = ser.searchsorted(val)
assert is_scalar(res)
assert res == 1
def test_searchsorted_datetime64_list(self):
ser = Series(date_range("20120101", periods=10, freq="2D"))
vals = [Timestamp("20120102"), Timestamp("20120104")]
res = ser.searchsorted(vals)
exp = np.array([1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_searchsorted_sorter(self):
# GH8490
ser = Series([3, 1, 2])
res = ser.searchsorted([0, 3], sorter=np.argsort(ser))
exp = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(res, exp)
def test_searchsorted_dataframe_fail(self):
# GH#49620
ser = Series([1, 2, 3, 4, 5])
vals = pd.DataFrame([[1, 2], [3, 4]])
msg = "Value must be 1-D array-like or scalar, DataFrame is not supported"
with pytest.raises(ValueError, match=msg):
ser.searchsorted(vals)
|
TestSeriesSearchSorted
|
python
|
openai__gym
|
gym/wrappers/frame_stack.py
|
{
"start": 195,
"end": 2946
}
|
class ____:
"""Ensures common frames are only stored once to optimize memory use.
To further reduce the memory use, it is optionally to turn on lz4 to compress the observations.
Note:
This object should only be converted to numpy array just before forward pass.
"""
__slots__ = ("frame_shape", "dtype", "shape", "lz4_compress", "_frames")
def __init__(self, frames: list, lz4_compress: bool = False):
"""Lazyframe for a set of frames and if to apply lz4.
Args:
frames (list): The frames to convert to lazy frames
lz4_compress (bool): Use lz4 to compress the frames internally
Raises:
DependencyNotInstalled: lz4 is not installed
"""
self.frame_shape = tuple(frames[0].shape)
self.shape = (len(frames),) + self.frame_shape
self.dtype = frames[0].dtype
if lz4_compress:
try:
from lz4.block import compress
except ImportError:
raise DependencyNotInstalled(
"lz4 is not installed, run `pip install gym[other]`"
)
frames = [compress(frame) for frame in frames]
self._frames = frames
self.lz4_compress = lz4_compress
def __array__(self, dtype=None):
"""Gets a numpy array of stacked frames with specific dtype.
Args:
dtype: The dtype of the stacked frames
Returns:
The array of stacked frames with dtype
"""
arr = self[:]
if dtype is not None:
return arr.astype(dtype)
return arr
def __len__(self):
"""Returns the number of frame stacks.
Returns:
The number of frame stacks
"""
return self.shape[0]
def __getitem__(self, int_or_slice: Union[int, slice]):
"""Gets the stacked frames for a particular index or slice.
Args:
int_or_slice: Index or slice to get items for
Returns:
np.stacked frames for the int or slice
"""
if isinstance(int_or_slice, int):
return self._check_decompress(self._frames[int_or_slice]) # single frame
return np.stack(
[self._check_decompress(f) for f in self._frames[int_or_slice]], axis=0
)
def __eq__(self, other):
"""Checks that the current frames are equal to the other object."""
return self.__array__() == other
def _check_decompress(self, frame):
if self.lz4_compress:
from lz4.block import decompress
return np.frombuffer(decompress(frame), dtype=self.dtype).reshape(
self.frame_shape
)
return frame
|
LazyFrames
|
python
|
getsentry__sentry
|
tests/sentry/notifications/api/endpoints/test_notification_actions_index.py
|
{
"start": 1038,
"end": 1768
}
|
class ____(TypedDict):
query: _Query
result: set[NotificationAction]
def _mock_register(
data: MutableMapping[str, Any],
) -> Callable[[type[ActionRegistrationT]], type[ActionRegistrationT]]:
trigger_type = ActionTrigger.get_value(data["triggerType"])
service_type = ActionService.get_value(data["serviceType"])
target_type = ActionTarget.get_value(data["targetType"])
assert trigger_type is not None, "triggerType must exist"
assert service_type is not None, "serviceType must exist"
assert target_type is not None, "targetType must exist"
return NotificationAction.register_action(
trigger_type=trigger_type, service_type=service_type, target_type=target_type
)
|
_QueryResult
|
python
|
kamyu104__LeetCode-Solutions
|
Python/sum-of-k-subarrays-with-length-at-least-m.py
|
{
"start": 50,
"end": 815
}
|
class ____(object):
def maxSum(self, nums, k, m):
"""
:type nums: List[int]
:type k: int
:type m: int
:rtype: int
"""
prefix = [0]*(len(nums)+1)
for i in xrange(len(nums)):
prefix[i+1] = prefix[i]+nums[i]
dp = [float("-inf")]*(len(nums)+1)
dp[0] = 0
for i in xrange(1, k+1):
new_dp = [float("-inf")]*(len(nums)+1)
mx = float("-inf")
for j in xrange(i*m-1, len(nums)):
mx = max(mx, dp[(j+1)-m])
new_dp[j+1] = (prefix[j+1]-prefix[(j+1)-m])+mx
if j+1 != i*m:
new_dp[j+1] = max(new_dp[j+1], new_dp[j]+nums[j])
dp = new_dp
return max(dp)
|
Solution
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py
|
{
"start": 5240,
"end": 5497
}
|
class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI BatchPredictionJob link."""
name = "Batch Prediction Job"
key = "batch_prediction_job_conf"
format_str = VERTEX_AI_BATCH_PREDICTION_JOB_LINK
|
VertexAIBatchPredictionJobLink
|
python
|
coleifer__peewee
|
tests/models.py
|
{
"start": 177487,
"end": 181957
}
|
class ____(ModelTestCase):
requires = [C_Product, C_Archive, C_Part]
def setUp(self):
super(TestDataModifyingCTEIntegration, self).setUp()
for i in range(5):
C_Product.create(name='p%s' % i, price=i)
mp1_c_g = C_Part.create(part='mp1-c-g')
mp1_c = C_Part.create(part='mp1-c', sub_part=mp1_c_g)
mp1 = C_Part.create(part='mp1', sub_part=mp1_c)
mp2_c_g = C_Part.create(part='mp2-c-g')
mp2_c = C_Part.create(part='mp2-c', sub_part=mp2_c_g)
mp2 = C_Part.create(part='mp2', sub_part=mp2_c)
def test_data_modifying_cte_delete(self):
query = (C_Product.delete()
.where(C_Product.price < 3)
.returning(C_Product))
cte = query.cte('moved_rows')
src = Select((cte,), (cte.c.id, cte.c.name, cte.c.price))
res = (C_Archive
.insert_from(src, (C_Archive.id, C_Archive.name, C_Archive.price))
.with_cte(cte)
.execute())
self.assertEqual(len(list(res)), 3)
self.assertEqual(
sorted([(p.name, p.price) for p in C_Product.select()]),
[('p3', 3), ('p4', 4)])
self.assertEqual(
sorted([(p.name, p.price) for p in C_Archive.select()]),
[('p0', 0), ('p1', 1), ('p2', 2)])
base = (C_Part
.select(C_Part.sub_part, C_Part.part)
.where(C_Part.part == 'mp1')
.cte('included_parts', recursive=True,
columns=('sub_part', 'part')))
PA = C_Part.alias('p')
recursive = (PA
.select(PA.sub_part, PA.part)
.join(base, on=(PA.part == base.c.sub_part)))
cte = base.union_all(recursive)
sq = Select((cte,), (cte.c.part,))
res = (C_Part.delete()
.where(C_Part.part.in_(sq))
.with_cte(cte)
.execute())
self.assertEqual(sorted([p.part for p in C_Part.select()]),
['mp2', 'mp2-c', 'mp2-c-g'])
def test_data_modifying_cte_update(self):
# Populate archive table w/copy of data in product.
C_Archive.insert_from(
C_Product.select(),
(C_Product.id, C_Product.name, C_Product.price)).execute()
query = (C_Product
.update(price=C_Product.price * 2)
.returning(C_Product.id, C_Product.name, C_Product.price))
cte = query.cte('t')
sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price)
self.assertEqual(sorted([(x.name, x.price) for x in sq]), [
('p0', 0), ('p1', 2), ('p2', 4), ('p3', 6), ('p4', 8)])
# Ensure changes were persisted.
self.assertEqual(sorted([(x.name, x.price) for x in C_Product]), [
('p0', 0), ('p1', 2), ('p2', 4), ('p3', 6), ('p4', 8)])
sq = Select((cte,), (cte.c.id, cte.c.price))
res = (C_Archive
.update(price=sq.c.price)
.from_(sq)
.where(C_Archive.id == sq.c.id)
.with_cte(cte)
.execute())
self.assertEqual(sorted([(x.name, x.price) for x in C_Product]), [
('p0', 0), ('p1', 4), ('p2', 8), ('p3', 12), ('p4', 16)])
self.assertEqual(sorted([(x.name, x.price) for x in C_Archive]), [
('p0', 0), ('p1', 4), ('p2', 8), ('p3', 12), ('p4', 16)])
def test_data_modifying_cte_insert(self):
query = (C_Product
.insert({'name': 'p5', 'price': 5})
.returning(C_Product.id, C_Product.name, C_Product.price))
cte = query.cte('t')
sq = cte.select_from(cte.c.id, cte.c.name, cte.c.price)
self.assertEqual([(p.name, p.price) for p in sq], [('p5', 5)])
query = (C_Product
.insert({'name': 'p6', 'price': 6})
.returning(C_Product.id, C_Product.name, C_Product.price))
cte = query.cte('t')
sq = Select((cte,), (cte.c.id, cte.c.name, cte.c.price))
res = (C_Archive
.insert_from(sq, (sq.c.id, sq.c.name, sq.c.price))
.with_cte(cte)
.execute())
self.assertEqual([(p.name, p.price) for p in C_Archive], [('p6', 6)])
self.assertEqual(sorted([(p.name, p.price) for p in C_Product]), [
('p0', 0), ('p1', 1), ('p2', 2), ('p3', 3), ('p4', 4), ('p5', 5),
('p6', 6)])
|
TestDataModifyingCTEIntegration
|
python
|
pandas-dev__pandas
|
pandas/tests/io/json/test_ujson.py
|
{
"start": 22685,
"end": 26855
}
|
class ____:
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = bool(bool_input)
assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b
def test_bool_array(self):
bool_array = np.array(
[True, False, True, True, False, True, False, False], dtype=bool
)
output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_numpy_dtype):
klass = np.dtype(any_int_numpy_dtype).type
num = klass(1)
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
def test_int_array(self, any_int_numpy_dtype):
arr = np.arange(100, dtype=int)
arr_input = arr.astype(any_int_numpy_dtype)
arr_output = np.array(
ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype
)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_numpy_dtype):
if any_int_numpy_dtype in ("int64", "uint64") and not IS64:
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_numpy_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_numpy_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_numpy_dtype).max
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
def test_float(self, float_numpy_dtype):
klass = np.dtype(float_numpy_dtype).type
num = klass(256.2013)
assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num
def test_float_array(self, float_numpy_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=float)
float_input = arr.astype(float_numpy_dtype)
float_output = np.array(
ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)),
dtype=float_numpy_dtype,
)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_numpy_dtype):
klass = np.dtype(float_numpy_dtype).type
num = klass(np.finfo(float_numpy_dtype).max / 10)
tm.assert_almost_equal(
klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num
)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
)
@pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr
)
def test_array_list(self):
arr_list = [
"a",
[],
{},
{},
[],
42,
97.8,
["a", "b"],
{"key": "val"},
]
arr = np.array(arr_list, dtype=object)
result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object)
tm.assert_numpy_array_equal(result, arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
# gh-18878
msg = re.escape(
"array(1) (numpy-scalar) is not JSON serializable at the moment"
)
with pytest.raises(TypeError, match=msg):
ujson.ujson_dumps(np.array(1))
def test_array_long_double(self):
msg = re.compile(
"1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment"
)
with pytest.raises(TypeError, match=msg):
ujson.ujson_dumps(np.longdouble(1234.5))
|
TestNumpyJSONTests
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/messages/base.py
|
{
"start": 12874,
"end": 16456
}
|
class ____(BaseMessage):
"""Message chunk, which can be concatenated with other Message chunks."""
def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]
"""Message chunks support concatenation with other message chunks.
This functionality is useful to combine message chunks yielded from
a streaming model into a complete message.
Args:
other: Another message chunk to concatenate with this one.
Returns:
A new message chunk that is the concatenation of this message chunk
and the other message chunk.
Raises:
TypeError: If the other object is not a message chunk.
For example,
`AIMessageChunk(content="Hello") + AIMessageChunk(content=" World")`
will give `AIMessageChunk(content="Hello World")`
"""
if isinstance(other, BaseMessageChunk):
# If both are (subclasses of) BaseMessageChunk,
# concat into a single BaseMessageChunk
return self.__class__(
id=self.id,
type=self.type,
content=merge_content(self.content, other.content),
additional_kwargs=merge_dicts(
self.additional_kwargs, other.additional_kwargs
),
response_metadata=merge_dicts(
self.response_metadata, other.response_metadata
),
)
if isinstance(other, list) and all(
isinstance(o, BaseMessageChunk) for o in other
):
content = merge_content(self.content, *(o.content for o in other))
additional_kwargs = merge_dicts(
self.additional_kwargs, *(o.additional_kwargs for o in other)
)
response_metadata = merge_dicts(
self.response_metadata, *(o.response_metadata for o in other)
)
return self.__class__( # type: ignore[call-arg]
id=self.id,
content=content,
additional_kwargs=additional_kwargs,
response_metadata=response_metadata,
)
msg = (
'unsupported operand type(s) for +: "'
f"{self.__class__.__name__}"
f'" and "{other.__class__.__name__}"'
)
raise TypeError(msg)
def message_to_dict(message: BaseMessage) -> dict:
"""Convert a Message to a dictionary.
Args:
message: Message to convert.
Returns:
Message as a dict. The dict will have a `type` key with the message type
and a `data` key with the message data as a dict.
"""
return {"type": message.type, "data": message.model_dump()}
def messages_to_dict(messages: Sequence[BaseMessage]) -> list[dict]:
"""Convert a sequence of Messages to a list of dictionaries.
Args:
messages: Sequence of messages (as `BaseMessage`s) to convert.
Returns:
List of messages as dicts.
"""
return [message_to_dict(m) for m in messages]
def get_msg_title_repr(title: str, *, bold: bool = False) -> str:
"""Get a title representation for a message.
Args:
title: The title.
bold: Whether to bold the title.
Returns:
The title representation.
"""
padded = " " + title + " "
sep_len = (80 - len(padded)) // 2
sep = "=" * sep_len
second_sep = sep + "=" if len(padded) % 2 else sep
if bold:
padded = get_bolded_text(padded)
return f"{sep}{padded}{second_sep}"
|
BaseMessageChunk
|
python
|
doocs__leetcode
|
solution/3100-3199/3162.Find the Number of Good Pairs I/Solution2.py
|
{
"start": 0,
"end": 403
}
|
class ____:
def numberOfPairs(self, nums1: List[int], nums2: List[int], k: int) -> int:
cnt1 = Counter(x // k for x in nums1 if x % k == 0)
if not cnt1:
return 0
cnt2 = Counter(nums2)
ans = 0
mx = max(cnt1)
for x, v in cnt2.items():
s = sum(cnt1[y] for y in range(x, mx + 1, x))
ans += s * v
return ans
|
Solution
|
python
|
plotly__plotly.py
|
plotly/graph_objs/isosurface/_stream.py
|
{
"start": 233,
"end": 3526
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface"
_path_str = "isosurface.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Stream
|
python
|
pytorch__pytorch
|
torch/distributed/_tools/memory_tracker.py
|
{
"start": 419,
"end": 1262
}
|
class ____(TorchDispatchMode):
"""Run in ``TorchDispatchMode`` to get memory stats at operator level."""
def __init__(self, memory_tracker) -> None:
self.memory_tracker = memory_tracker
def __torch_dispatch__(self, func, types, args=..., kwargs=None):
rs = func(*args, **kwargs)
if func is torch.ops.aten.detach.default:
return rs
func_name: str = (
self.memory_tracker._cur_module_name
+ "."
+ func.__name__
+ "_"
+ str(self.memory_tracker._operator_names[func.__name__])
)
self.memory_tracker._operator_names[func.__name__] = (
self.memory_tracker._operator_names[func.__name__] + 1
)
self.memory_tracker._record_memory_stats(func_name)
return rs
|
MemoryProfileDispatchMode
|
python
|
getsentry__sentry
|
src/sentry/plugins/sentry_interface_types/apps.py
|
{
"start": 36,
"end": 279
}
|
class ____(AppConfig):
name = "sentry.plugins.sentry_interface_types"
def ready(self) -> None:
from sentry.plugins.base import register
from .models import InterfaceTypePlugin
register(InterfaceTypePlugin)
|
Config
|
python
|
davidhalter__jedi
|
jedi/inference/compiled/subprocess/__init__.py
|
{
"start": 4467,
"end": 8243
}
|
class ____(_InferenceStateProcess):
"""
API to functionality which will run in a subprocess.
This mediates the interaction between an `InferenceState` and the actual
execution of functionality running within a `CompiledSubprocess`. Available
functions are defined in `.functions`, though should be accessed via
attributes on this class of the same name.
This class is responsible for indicating that the `InferenceState` within
the subprocess can be removed once the corresponding instance in the parent
goes away.
"""
def __init__(
self,
inference_state: 'InferenceState',
compiled_subprocess: 'CompiledSubprocess',
) -> None:
super().__init__(inference_state)
self._used = False
self._compiled_subprocess = compiled_subprocess
# Opaque id we'll pass to the subprocess to identify the context (an
# `InferenceState`) which should be used for the request. This allows us
# to make subsequent requests which operate on results from previous
# ones, while keeping a single subprocess which can work with several
# contexts in the parent process. Once it is no longer needed(i.e: when
# this class goes away), we also use this id to indicate that the
# subprocess can discard the context.
#
# Note: this id is deliberately coupled to this class (and not to
# `InferenceState`) as this class manages access handle mappings which
# must correspond to those in the subprocess. This approach also avoids
# race conditions from successive `InferenceState`s with the same object
# id (as observed while adding support for Python 3.13).
#
# This value does not need to be the `id()` of this instance, we merely
# need to ensure that it enables the (visible) lifetime of the context
# within the subprocess to match that of this class. We therefore also
# depend on the semantics of `CompiledSubprocess.delete_inference_state`
# for correctness.
self._inference_state_id = id(self)
def __getattr__(self, name):
func = _get_function(name)
def wrapper(*args, **kwargs):
self._used = True
result = self._compiled_subprocess.run(
self._inference_state_id,
func,
args=args,
kwargs=kwargs,
)
# IMO it should be possible to create a hook in pickle.load to
# mess with the loaded objects. However it's extremely complicated
# to work around this so just do it with this call. ~ dave
return self._convert_access_handles(result)
return wrapper
def _convert_access_handles(self, obj):
if isinstance(obj, SignatureParam):
return SignatureParam(*self._convert_access_handles(tuple(obj)))
elif isinstance(obj, tuple):
return tuple(self._convert_access_handles(o) for o in obj)
elif isinstance(obj, list):
return [self._convert_access_handles(o) for o in obj]
elif isinstance(obj, AccessHandle):
try:
# Rewrite the access handle to one we're already having.
obj = self.get_access_handle(obj.id)
except KeyError:
obj.add_subprocess(self)
self.set_access_handle(obj)
elif isinstance(obj, AccessPath):
return AccessPath(self._convert_access_handles(obj.accesses))
return obj
def __del__(self):
if self._used and not self._compiled_subprocess.is_crashed:
self._compiled_subprocess.delete_inference_state(self._inference_state_id)
|
InferenceStateSubprocess
|
python
|
pikepdf__pikepdf
|
src/pikepdf/_methods.py
|
{
"start": 5713,
"end": 15752
}
|
class ____:
def _quick_save(self):
bio = BytesIO()
self.save(bio)
bio.seek(0)
return bio
def _repr_mimebundle_(self, include=None, exclude=None): # pylint: disable=unused-argument
pdf_data = self._quick_save().read()
data = {
'application/pdf': pdf_data,
}
with suppress(FileNotFoundError, RuntimeError):
data['image/svg+xml'] = _mudraw(pdf_data, 'svg').decode('utf-8')
return data
@property
def docinfo(self) -> Dictionary:
if Name.Info not in self.trailer or not isinstance(
self.trailer.Info, Dictionary
):
self.trailer.Info = self.make_indirect(Dictionary())
if not self.trailer.Info.is_indirect:
self.trailer.Info = self.make_indirect(self.trailer.Info)
return self.trailer.Info
@docinfo.setter
def docinfo(self, new_docinfo: Dictionary):
if not new_docinfo.is_indirect:
raise ValueError(
"docinfo must be an indirect object - use Pdf.make_indirect"
)
self.trailer.Info = new_docinfo
@docinfo.deleter
def docinfo(self):
if Name.Info in self.trailer:
del self.trailer.Info
def open_metadata(
self,
set_pikepdf_as_editor: bool = True,
update_docinfo: bool = True,
strict: bool = False,
) -> PdfMetadata:
return PdfMetadata(
self,
pikepdf_mark=set_pikepdf_as_editor,
sync_docinfo=update_docinfo,
overwrite_invalid_xml=not strict,
)
def open_outline(self, max_depth: int = 15, strict: bool = False) -> Outline:
return Outline(self, max_depth=max_depth, strict=strict)
def make_stream(self, data: bytes, d=None, **kwargs) -> Stream:
return Stream(self, data, d, **kwargs)
def add_blank_page(
self, *, page_size: tuple[Numeric, Numeric] = (612.0, 792.0)
) -> Page:
for dim in page_size:
if not (3 <= dim <= 14400):
raise ValueError('Page size must be between 3 and 14400 PDF units')
page_dict = Dictionary(
Type=Name.Page,
MediaBox=Array([0, 0, page_size[0], page_size[1]]),
Contents=self.make_stream(b''),
Resources=Dictionary(),
)
page_obj = self.make_indirect(page_dict)
self._add_page(page_obj, first=False)
return Page(page_obj)
def close(self) -> None:
self._close()
if getattr(self, '_tmp_stream', None):
self._tmp_stream.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
@property
def allow(self) -> Permissions:
results = {}
for field in Permissions._fields:
results[field] = getattr(self, '_allow_' + field)
return Permissions(**results)
@property
def encryption(self) -> EncryptionInfo:
return EncryptionInfo(self._encryption_data)
def check_pdf_syntax(
self, progress: Callable[[int], None] | None = None
) -> list[str]:
class DiscardingParser(StreamParser):
def __init__(self): # pylint: disable=useless-super-delegation
super().__init__() # required for C++
def handle_object(self, *_args):
pass
def handle_eof(self):
pass
problems: list[str] = []
self._decode_all_streams_and_discard(progress)
discarding_parser = DiscardingParser()
for page in self.pages:
page.parse_contents(discarding_parser)
for warning in self.get_warnings():
problems.append("WARNING: " + warning)
return problems
def save(
self,
filename_or_stream: Path | str | BinaryIO | None = None,
*,
static_id: bool = False,
preserve_pdfa: bool = True,
min_version: str | tuple[str, int] = "",
force_version: str | tuple[str, int] = "",
fix_metadata_version: bool = True,
compress_streams: bool = True,
stream_decode_level: StreamDecodeLevel | None = None,
object_stream_mode: ObjectStreamMode = ObjectStreamMode.preserve,
normalize_content: bool = False,
linearize: bool = False,
qdf: bool = False,
progress: Callable[[int], None] | None = None,
encryption: Encryption | bool | None = None,
recompress_flate: bool = False,
deterministic_id: bool = False,
) -> None:
if not filename_or_stream and getattr(self, '_original_filename', None):
filename_or_stream = self._original_filename
if not filename_or_stream:
raise ValueError(
"Cannot save to original filename because the original file was "
"not opening using Pdf.open(..., allow_overwriting_input=True). "
"Either specify a new destination filename/file stream or open "
"with allow_overwriting_input=True. If this Pdf was created using "
"Pdf.new(), you must specify a destination object since there is "
"no original filename to save to."
)
with ExitStack() as stack:
if hasattr(filename_or_stream, 'seek'):
stream = filename_or_stream
check_stream_is_usable(filename_or_stream)
else:
if not isinstance(filename_or_stream, str | bytes | Path):
raise TypeError("expected str, bytes or os.PathLike object")
filename = Path(filename_or_stream)
if (
not getattr(self, '_tmp_stream', None)
and getattr(self, '_original_filename', None) is not None
):
check_different_files(self._original_filename, filename)
stream = stack.enter_context(atomic_overwrite(filename))
self._save(
stream,
static_id=static_id,
preserve_pdfa=preserve_pdfa,
min_version=min_version,
force_version=force_version,
fix_metadata_version=fix_metadata_version,
compress_streams=compress_streams,
stream_decode_level=stream_decode_level,
object_stream_mode=object_stream_mode,
normalize_content=normalize_content,
linearize=linearize,
qdf=qdf,
progress=progress,
encryption=encryption,
samefile_check=getattr(self, '_tmp_stream', None) is None,
recompress_flate=recompress_flate,
deterministic_id=deterministic_id,
)
@staticmethod
def open(
filename_or_stream: Path | str | BinaryIO,
*,
password: str | bytes = "",
hex_password: bool = False,
ignore_xref_streams: bool = False,
suppress_warnings: bool = True,
attempt_recovery: bool = True,
inherit_page_attributes: bool = True,
access_mode: AccessMode = AccessMode.default,
allow_overwriting_input: bool = False,
) -> Pdf:
if isinstance(filename_or_stream, bytes) and filename_or_stream.startswith(
b'%PDF-'
):
warn(
"It looks like you called with Pdf.open(data) with a bytes-like object "
"containing a PDF. This will probably fail because this function "
"expects a filename or opened file-like object. Instead, please use "
"Pdf.open(BytesIO(data))."
)
if isinstance(filename_or_stream, int | float):
# Attempted to open with integer file descriptor?
# TODO improve error
raise TypeError("expected str, bytes or os.PathLike object")
stream: RawIOBase | None = None
closing_stream: bool = False
original_filename: Path | None = None
if allow_overwriting_input:
try:
Path(filename_or_stream)
except TypeError as error:
raise ValueError(
'"allow_overwriting_input=True" requires "open" first argument '
'to be a file path'
) from error
original_filename = Path(filename_or_stream)
with open(original_filename, 'rb') as pdf_file:
stream = BytesIO()
shutil.copyfileobj(pdf_file, stream)
stream.seek(0)
# description = f"memory copy of {original_filename}"
description = str(original_filename)
elif hasattr(filename_or_stream, 'read') and hasattr(
filename_or_stream, 'seek'
):
stream = filename_or_stream
description = f"stream {stream}"
else:
stream = open(filename_or_stream, 'rb')
original_filename = Path(filename_or_stream)
description = str(filename_or_stream)
closing_stream = True
try:
check_stream_is_usable(stream)
pdf = Pdf._open(
stream,
password=password,
hex_password=hex_password,
ignore_xref_streams=ignore_xref_streams,
suppress_warnings=suppress_warnings,
attempt_recovery=attempt_recovery,
inherit_page_attributes=inherit_page_attributes,
access_mode=access_mode,
description=description,
closing_stream=closing_stream,
)
except Exception:
if stream is not None and closing_stream:
stream.close()
raise
pdf._tmp_stream = stream if allow_overwriting_input else None
pdf._original_filename = original_filename
return pdf
@augments(_ObjectMapping)
|
Extend_Pdf
|
python
|
openai__openai-python
|
src/openai/types/responses/tool_choice_mcp.py
|
{
"start": 218,
"end": 481
}
|
class ____(BaseModel):
server_label: str
"""The label of the MCP server to use."""
type: Literal["mcp"]
"""For MCP tools, the type is always `mcp`."""
name: Optional[str] = None
"""The name of the tool to call on the server."""
|
ToolChoiceMcp
|
python
|
django-haystack__django-haystack
|
test_haystack/solr_tests/test_solr_backend.py
|
{
"start": 6005,
"end": 6309
}
|
class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return MockModel
def prepare_text(self, obj):
return """Don't panic but %s has been iñtërnâtiônàlizéð""" % obj.author
|
SolrQuotingMockSearchIndex
|
python
|
getsentry__sentry
|
src/sentry/releases/endpoints/project_release_file_details.py
|
{
"start": 1177,
"end": 1794
}
|
class ____(serializers.Serializer):
name = serializers.CharField(max_length=200, required=True)
def _entry_from_index(release: Release, dist: Distribution | None, url: str) -> ReleaseFile:
index = read_artifact_index(release, dist)
if index is None:
raise ResourceDoesNotExist
try:
return index.get("files", {})[url]
except KeyError:
raise ResourceDoesNotExist
def _get_from_index(release: Release, dist: Distribution | None, url: str) -> ReleaseFile:
entry = _entry_from_index(release, dist, url)
return pseudo_releasefile(url, entry, dist)
|
ReleaseFileSerializer
|
python
|
django__django
|
tests/admin_views/admin.py
|
{
"start": 22929,
"end": 23181
}
|
class ____(admin.TabularInline):
model = RelatedPrepopulated
extra = 1
autocomplete_fields = ["fk", "m2m"]
prepopulated_fields = {
"slug1": ["name", "pubdate"],
"slug2": ["status", "name"],
}
|
RelatedPrepopulatedInline2
|
python
|
kamyu104__LeetCode-Solutions
|
Python/linked-list-cycle-ii.py
|
{
"start": 29,
"end": 247
}
|
class ____(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
if self:
return "{}".format(self.val)
else:
return None
|
ListNode
|
python
|
Pylons__pyramid
|
src/pyramid/config/routes.py
|
{
"start": 525,
"end": 24317
}
|
class ____:
@action_method
def add_route(
self,
name,
pattern=None,
factory=None,
for_=None,
header=None,
xhr=None,
accept=None,
path_info=None,
request_method=None,
request_param=None,
traverse=None,
custom_predicates=(),
use_global_views=False,
path=None,
pregenerator=None,
static=False,
inherit_slash=None,
**predicates,
):
"""Add a :term:`route configuration` to the current configuration
state. Arguments to ``add_route`` are divided into *predicate*
and *non-predicate* types. :term:`Route predicate` arguments
narrow the circumstances in which a route will match a
request; non-predicate arguments are informational.
Non-Predicate Arguments
name
The name of the route, e.g. ``myroute``. This attribute is
required. It must be unique among all defined routes in a given
application.
factory
A Python object (often a function or a class) or a :term:`dotted
Python name` which refers to the same object that will generate a
:app:`Pyramid` root resource object when this route matches. For
example, ``mypackage.resources.MyFactory``. If this argument is
not specified, a default root factory will be used. See
:ref:`the_resource_tree` for more information about root factories.
traverse
If you would like to cause the :term:`context` to be
something other than the :term:`root` object when this route
matches, you can spell a traversal pattern as the
``traverse`` argument. This traversal pattern will be used
as the traversal path: traversal will begin at the root
object implied by this route (either the global root, or the
object returned by the ``factory`` associated with this
route).
The syntax of the ``traverse`` argument is the same as it is
for ``pattern``. For example, if the ``pattern`` provided to
``add_route`` is ``articles/{article}/edit``, and the
``traverse`` argument provided to ``add_route`` is
``/{article}``, when a request comes in that causes the route
to match in such a way that the ``article`` match value is
``'1'`` (when the request URI is ``/articles/1/edit``), the
traversal path will be generated as ``/1``. This means that
the root object's ``__getitem__`` will be called with the
name ``'1'`` during the traversal phase. If the ``'1'`` object
exists, it will become the :term:`context` of the request.
:ref:`traversal_chapter` has more information about
traversal.
If the traversal path contains segment marker names which
are not present in the ``pattern`` argument, a runtime error
will occur. The ``traverse`` pattern should not contain
segment markers that do not exist in the ``pattern``
argument.
A similar combining of routing and traversal is available
when a route is matched which contains a ``*traverse``
remainder marker in its pattern (see
:ref:`using_traverse_in_a_route_pattern`). The ``traverse``
argument to add_route allows you to associate route patterns
with an arbitrary traversal path without using a
``*traverse`` remainder marker; instead you can use other
match information.
Note that the ``traverse`` argument to ``add_route`` is
ignored when attached to a route that has a ``*traverse``
remainder marker in its pattern.
pregenerator
This option should be a callable object that implements the
:class:`pyramid.interfaces.IRoutePregenerator` interface. A
:term:`pregenerator` is a callable called by the
:meth:`pyramid.request.Request.route_url` function to augment or
replace the arguments it is passed when generating a URL for the
route. This is a feature not often used directly by applications,
it is meant to be hooked by frameworks that use :app:`Pyramid` as
a base.
use_global_views
When a request matches this route, and view lookup cannot
find a view which has a ``route_name`` predicate argument
that matches the route, try to fall back to using a view
that otherwise matches the context, request, and view name
(but which does not match the route_name predicate).
static
If ``static`` is ``True``, this route will never match an incoming
request; it will only be useful for URL generation. By default,
``static`` is ``False``. See :ref:`static_route_narr`.
.. versionadded:: 1.1
inherit_slash
This argument can only be used when the ``pattern`` is an empty
string (``''``). By default, the composed route pattern will always
include a trailing slash, but this argument provides a way to
opt-out if both, you (the developer invoking ``add_route``) and the
integrator (the developer setting the :term:`route prefix`),
agree that the pattern should not contain a trailing slash.
For example:
.. code-block:: python
with config.route_prefix_context('/users'):
config.add_route('users', '', inherit_slash=True)
In this example, the resulting route pattern will be ``/users``.
Alternatively, if the route prefix were ``/users/``, then the
resulting route pattern would be ``/users/``.
.. versionadded:: 2.0
Predicate Arguments
pattern
The pattern of the route e.g. ``ideas/{idea}``. This
argument is required. See :ref:`route_pattern_syntax`
for information about the syntax of route patterns. If the
pattern doesn't match the current URL, route matching
continues.
.. note::
For backwards compatibility purposes (as of :app:`Pyramid` 1.0), a
``path`` keyword argument passed to this function will be used to
represent the pattern value if the ``pattern`` argument is
``None``. If both ``path`` and ``pattern`` are passed,
``pattern`` wins.
xhr
This value should be either ``True`` or ``False``. If this
value is specified and is ``True``, the :term:`request` must
possess an ``HTTP_X_REQUESTED_WITH`` (aka
``X-Requested-With``) header for this route to match. This
is useful for detecting AJAX requests issued from jQuery,
Prototype and other Javascript libraries. If this predicate
returns ``False``, route matching continues.
request_method
A string representing an HTTP method name, e.g. ``GET``, ``POST``,
``HEAD``, ``DELETE``, ``PUT`` or a tuple of elements containing
HTTP method names. If this argument is not specified, this route
will match if the request has *any* request method. If this
predicate returns ``False``, route matching continues.
.. versionchanged:: 1.2
The ability to pass a tuple of items as ``request_method``.
Previous versions allowed only a string.
path_info
This value represents a regular expression pattern that will
be tested against the ``PATH_INFO`` WSGI environment
variable. If the regex matches, this predicate will return
``True``. If this predicate returns ``False``, route
matching continues.
request_param
This value can be any string or an iterable of strings. A view
declaration with this argument ensures that the associated route will
only match when the request has a key in the ``request.params``
dictionary (an HTTP ``GET`` or ``POST`` variable) that has a
name which matches the supplied value. If the value
supplied as the argument has a ``=`` sign in it,
e.g. ``request_param="foo=123"``, then both the key
(``foo``) must exist in the ``request.params`` dictionary, and
the value must match the right hand side of the expression (``123``)
for the route to "match" the current request. If this predicate
returns ``False``, route matching continues.
header
This argument can be a string or an iterable of strings for HTTP
headers. The matching is determined as follow:
- If a string does not contain a ``:`` (colon), it will be
considered to be the header name (example ``If-Modified-Since``).
In this case, the header specified by the name must be present
in the request for this string to match. Case is not significant.
- If a string contains a colon, it will be considered a
name/value pair (for example ``User-Agent:Mozilla/.*`` or
``Host:localhost``), where the value part is a regular
expression. The header specified by the name must be present
in the request *and* the regular expression specified as the
value part must match the value of the request header. Case is
not significant for the header name, but it is for the value.
All strings must be matched for this predicate to return ``True``.
If this predicate returns ``False``, route matching continues.
accept
A :term:`media type` that will be matched against the ``Accept``
HTTP request header. If this value is specified, it may be a
specific media type such as ``text/html``, or a list of the same.
If the media type is acceptable by the ``Accept`` header of the
request, or if the ``Accept`` header isn't set at all in the request,
this predicate will match. If this does not match the ``Accept``
header of the request, route matching continues.
If ``accept`` is not specified, the ``HTTP_ACCEPT`` HTTP header is
not taken into consideration when deciding whether or not to select
the route.
Unlike the ``accept`` argument to
:meth:`pyramid.config.Configurator.add_view`, this value is
strictly a predicate and supports :func:`pyramid.config.not_`.
.. versionchanged:: 1.10
Specifying a media range is deprecated due to changes in WebOb
and ambiguities that occur when trying to match ranges against
ranges in the ``Accept`` header. Support will be removed in
:app:`Pyramid` 2.0. Use a list of specific media types to match
more than one type.
.. versionchanged:: 2.0
Removed support for media ranges.
is_authenticated
This value, if specified, must be either ``True`` or ``False``.
If it is specified and ``True``, only a request from an authenticated
user, as determined by the :term:`security policy` in use, will
satisfy the predicate.
If it is specified and ``False``, only a request from a user who is
not authenticated will satisfy the predicate.
.. versionadded:: 2.0
effective_principals
If specified, this value should be a :term:`principal` identifier or
a sequence of principal identifiers. If the
:attr:`pyramid.request.Request.effective_principals` property
indicates that every principal named in the argument list is present
in the current request, this predicate will return True; otherwise it
will return False. For example:
``effective_principals=pyramid.authorization.Authenticated`` or
``effective_principals=('fred', 'group:admins')``.
.. versionadded:: 1.4a4
.. deprecated:: 2.0
Use ``is_authenticated`` or a custom predicate.
custom_predicates
.. deprecated:: 1.5
This value should be a sequence of references to custom
predicate callables. Use custom predicates when no set of
predefined predicates does what you need. Custom predicates
can be combined with predefined predicates as necessary.
Each custom predicate callable should accept two arguments:
``info`` and ``request`` and should return either ``True``
or ``False`` after doing arbitrary evaluation of the info
and/or the request. If all custom and non-custom predicate
callables return ``True`` the associated route will be
considered viable for a given request. If any predicate
callable returns ``False``, route matching continues. Note
that the value ``info`` passed to a custom route predicate
is a dictionary containing matching information; see
:ref:`custom_route_predicates` for more information about
``info``.
\\*\\*predicates
Pass extra keyword parameters to use custom predicates registered via
:meth:`pyramid.config.Configurator.add_route_predicate`. More than
one custom predicate can be used at the same time. See
:ref:`view_and_route_predicates` for more information about
custom predicates.
.. versionadded:: 1.4
"""
if custom_predicates:
warnings.warn(
(
'The "custom_predicates" argument to '
'Configurator.add_route is deprecated as of Pyramid 1.5. '
'Use "config.add_route_predicate" and use the registered '
'route predicate as a predicate argument to add_route '
'instead. See "Adding A Custom View, Route, or '
'Subscriber Predicate" in the "Hooks" chapter of the '
'documentation for more information.'
),
DeprecationWarning,
stacklevel=3,
)
if 'effective_principals' in predicates:
warnings.warn(
(
'The new security policy has deprecated '
'effective_principals. See "Upgrading '
'Authentication/Authorization" in "What\'s New in '
'Pyramid 2.0" of the documentation for more information.'
),
DeprecationWarning,
stacklevel=3,
)
if accept is not None:
if not is_nonstr_iter(accept):
accept = [accept]
accept = [
normalize_accept_offer(accept_option)
for accept_option in accept
]
# these are route predicates; if they do not match, the next route
# in the routelist will be tried
if request_method is not None:
request_method = as_sorted_tuple(request_method)
factory = self.maybe_dotted(factory)
if pattern is None:
pattern = path
if pattern is None:
raise ConfigurationError('"pattern" argument may not be None')
if inherit_slash and pattern != '':
raise ConfigurationError(
'"inherit_slash" may only be used with an empty pattern'
)
# check for an external route; an external route is one which is
# is a full url (e.g. 'http://example.com/{id}')
parsed = urlparse(pattern)
external_url = pattern
if parsed.hostname:
pattern = parsed.path
original_pregenerator = pregenerator
def external_url_pregenerator(request, elements, kw):
if '_app_url' in kw:
raise ValueError(
'You cannot generate a path to an external route '
'pattern via request.route_path nor pass an _app_url '
'to request.route_url when generating a URL for an '
'external route pattern (pattern was "%s") '
% (pattern,)
)
if '_scheme' in kw:
scheme = kw['_scheme']
elif parsed.scheme:
scheme = parsed.scheme
else:
scheme = request.scheme
kw['_app_url'] = f'{scheme}://{parsed.netloc}'
if original_pregenerator:
elements, kw = original_pregenerator(request, elements, kw)
return elements, kw
pregenerator = external_url_pregenerator
static = True
elif self.route_prefix:
if pattern == '' and inherit_slash:
pattern = self.route_prefix
else:
pattern = (
self.route_prefix.rstrip('/') + '/' + pattern.lstrip('/')
)
mapper = self.get_routes_mapper()
introspectables = []
intr = self.introspectable(
'routes', name, f'{name} (pattern: {pattern!r})', 'route'
)
intr['name'] = name
intr['pattern'] = pattern
intr['factory'] = factory
intr['xhr'] = xhr
intr['request_methods'] = request_method
intr['path_info'] = path_info
intr['request_param'] = request_param
intr['header'] = header
intr['accept'] = accept
intr['traverse'] = traverse
intr['custom_predicates'] = custom_predicates
intr['pregenerator'] = pregenerator
intr['static'] = static
intr['use_global_views'] = use_global_views
if static is True:
intr['external_url'] = external_url
introspectables.append(intr)
if factory:
factory_intr = self.introspectable(
'root factories',
name,
self.object_description(factory),
'root factory',
)
factory_intr['factory'] = factory
factory_intr['route_name'] = name
factory_intr.relate('routes', name)
introspectables.append(factory_intr)
def register_route_request_iface():
request_iface = self.registry.queryUtility(
IRouteRequest, name=name
)
if request_iface is None:
if use_global_views:
bases = (IRequest,)
else:
bases = ()
request_iface = route_request_iface(name, bases)
self.registry.registerUtility(
request_iface, IRouteRequest, name=name
)
def register_connect():
pvals = predicates.copy()
pvals.update(
dict(
xhr=xhr,
request_method=request_method,
path_info=path_info,
request_param=request_param,
header=header,
accept=accept,
traverse=traverse,
custom=predvalseq(custom_predicates),
)
)
predlist = self.get_predlist('route')
_, preds, _ = predlist.make(self, **pvals)
route = mapper.connect(
name,
pattern,
factory,
predicates=preds,
pregenerator=pregenerator,
static=static,
)
intr['object'] = route
return route
# We have to connect routes in the order they were provided;
# we can't use a phase to do that, because when the actions are
# sorted, actions in the same phase lose relative ordering
self.action(('route-connect', name), register_connect)
# But IRouteRequest interfaces must be registered before we begin to
# process view registrations (in phase 3)
self.action(
('route', name),
register_route_request_iface,
order=PHASE2_CONFIG,
introspectables=introspectables,
)
@action_method
def add_route_predicate(
self, name, factory, weighs_more_than=None, weighs_less_than=None
):
"""Adds a route predicate factory. The view predicate can later be
named as a keyword argument to
:meth:`pyramid.config.Configurator.add_route`.
``name`` should be the name of the predicate. It must be a valid
Python identifier (it will be used as a keyword argument to
``add_route``).
``factory`` should be a :term:`predicate factory` or :term:`dotted
Python name` which refers to a predicate factory.
See :ref:`view_and_route_predicates` for more information.
.. versionadded:: 1.4
"""
self._add_predicate(
'route',
name,
factory,
weighs_more_than=weighs_more_than,
weighs_less_than=weighs_less_than,
)
def add_default_route_predicates(self):
p = pyramid.predicates
for name, factory in (
('xhr', p.XHRPredicate),
('request_method', p.RequestMethodPredicate),
('path_info', p.PathInfoPredicate),
('request_param', p.RequestParamPredicate),
('header', p.HeaderPredicate),
('accept', p.AcceptPredicate),
('is_authenticated', p.IsAuthenticatedPredicate),
('effective_principals', p.EffectivePrincipalsPredicate),
('custom', p.CustomPredicate),
('traverse', p.TraversePredicate),
):
self.add_route_predicate(name, factory)
def get_routes_mapper(self):
"""Return the :term:`routes mapper` object associated with
this configurator's :term:`registry`."""
mapper = self.registry.queryUtility(IRoutesMapper)
if mapper is None:
mapper = RoutesMapper()
self.registry.registerUtility(mapper, IRoutesMapper)
return mapper
@contextlib.contextmanager
def route_prefix_context(self, route_prefix):
"""
Return this configurator with a :term:`route prefix` temporarily set.
When the context exits, the ``route_prefix`` is reset to the original.
``route_prefix`` is a string suitable to be used as a route prefix,
or ``None``.
Example Usage:
.. code-block:: python
config = Configurator()
with config.route_prefix_context('foo'):
config.add_route('bar', '/bar')
.. versionadded:: 1.10
"""
original_route_prefix = self.route_prefix
if route_prefix is None:
route_prefix = ''
old_route_prefix = self.route_prefix
if old_route_prefix is None:
old_route_prefix = ''
route_prefix = '{}/{}'.format(
old_route_prefix.rstrip('/'), route_prefix.lstrip('/')
)
route_prefix = route_prefix.strip('/')
if not route_prefix:
route_prefix = None
self.begin()
try:
self.route_prefix = route_prefix
yield
finally:
self.route_prefix = original_route_prefix
self.end()
|
RoutesConfiguratorMixin
|
python
|
ray-project__ray
|
python/ray/train/v2/horovod/horovod_trainer.py
|
{
"start": 368,
"end": 1442
}
|
class ____(DataParallelTrainer):
"""A Trainer for data parallel Horovod training. HorovodTrainer is deprecated."""
def __init__(
self,
train_loop_per_worker: Union[Callable[[], None], Callable[[Dict], None]],
*,
train_loop_config: Optional[Dict] = None,
horovod_config: Optional[HorovodConfig] = None,
scaling_config: Optional[ScalingConfig] = None,
dataset_config: Optional[DataConfig] = None,
run_config: Optional[RunConfig] = None,
datasets: Optional[Dict[str, GenDataset]] = None,
metadata: Optional[Dict[str, Any]] = None,
resume_from_checkpoint: Optional[Checkpoint] = None,
):
raise DeprecationWarning(
"`HorovodTrainer` is not supported and is scheduled to be removed "
"in the future. "
"Please consider using `TorchTrainer` or `TensorflowTrainer`, "
"fall back to the old implementation with `RAY_TRAIN_V2_ENABLED=0`, "
"or file an issue on Github describing your use case."
)
|
HorovodTrainer
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/delete_artifact_sequence.py
|
{
"start": 300,
"end": 394
}
|
class ____(GQLResult):
result: Optional[DeleteArtifactSequenceResult]
|
DeleteArtifactSequence
|
python
|
facebook__pyre-check
|
client/language_server/connections.py
|
{
"start": 5816,
"end": 6466
}
|
class ____:
"""
An adapter for `AsyncBytesWriter` that encodes everything it writes immediately
from string to bytes. In other words, it tries to expose the same interfaces
as `AsyncBytesWriter` except it operates on strings rather than bytestrings.
"""
bytes_writer: AsyncBytesWriter
encoding: str
def __init__(self, bytes_writer: AsyncBytesWriter, encoding: str = "utf-8") -> None:
self.bytes_writer = bytes_writer
self.encoding = encoding
async def write(self, data: str) -> None:
data_bytes = data.encode(self.encoding)
await self.bytes_writer.write(data_bytes)
|
AsyncTextWriter
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/twitch/provider.py
|
{
"start": 789,
"end": 1318
}
|
class ____(OAuth2Provider):
id = "twitch"
name = "Twitch"
account_class = TwitchAccount
oauth2_adapter_class = TwitchOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return {
"username": data.get("login"),
"name": data.get("display_name"),
"email": data.get("email"),
}
def get_default_scope(self):
return ["user:read:email"]
provider_classes = [TwitchProvider]
|
TwitchProvider
|
python
|
Textualize__textual
|
docs/examples/app/question02.py
|
{
"start": 87,
"end": 551
}
|
class ____(App[str]):
CSS_PATH = "question02.tcss"
def compose(self) -> ComposeResult:
yield Label("Do you love Textual?", id="question")
yield Button("Yes", id="yes", variant="primary")
yield Button("No", id="no", variant="error")
def on_button_pressed(self, event: Button.Pressed) -> None:
self.exit(event.button.id)
if __name__ == "__main__":
app = QuestionApp()
reply = app.run()
print(reply)
|
QuestionApp
|
python
|
PyCQA__pylint
|
tests/functional/u/use/use_maxsplit_arg.py
|
{
"start": 2491,
"end": 3449
}
|
class ____():
split = '1,2,3'
# Error message should show Bar.split.split(',', maxsplit=1) or Bar.split.rsplit(',', maxsplit=1)
print(Bar.split.split(",")[0]) # [use-maxsplit-arg]
print(Bar.split.split(",")[-1]) # [use-maxsplit-arg]
print(Bar.split.rsplit(",")[0]) # [use-maxsplit-arg]
print(Bar.split.rsplit(",")[-1]) # [use-maxsplit-arg]
# Special cases
a = "1,2,3".split('\n')[0] # [use-maxsplit-arg]
a = "1,2,3".split('split')[-1] # [use-maxsplit-arg]
a = "1,2,3".rsplit('rsplit')[0] # [use-maxsplit-arg]
# Test cases for false-positive reported in #4664
# https://github.com/pylint-dev/pylint/issues/4664
source = 'A.B.C.D.E.F.G'
i = 0
for j in range(5):
print(source.split('.')[i])
i = i + 1
sepNone = {"sep": None}
# Test for crash when sep is given by keyword
# https://github.com/pylint-dev/pylint/issues/5737
get_last = SEQ.split(sep=None)[-1] # [use-maxsplit-arg]
get_last = SEQ.split(**sepNone)[-1] # [use-maxsplit-arg]
|
Bar
|
python
|
sphinx-doc__sphinx
|
tests/test_builders/test_build_linkcheck.py
|
{
"start": 34733,
"end": 41595
}
|
class ____(InfiniteRedirectOnHeadHandler):
protocol_version = 'HTTP/1.1'
def do_GET(self) -> None:
self.send_response(301, 'Found')
if self.path == '/':
self.send_header('Location', '/local')
elif self.path == '/local':
self.send_header('Location', 'http://example.test/migrated')
self.send_header('Content-Length', '0')
self.end_headers()
@pytest.mark.sphinx('linkcheck', testroot='linkcheck-localserver')
def test_ignore_remote_redirection(app: SphinxTestApp) -> None:
with serve_application(app, RemoteDomainRedirectHandler) as address:
app.config.linkcheck_ignore = ['http://example.test']
app.build()
with open(app.outdir / 'output.json', encoding='utf-8') as fp:
content = json.load(fp)
assert content == {
'code': 301,
'status': 'ignored',
'filename': 'index.rst',
'lineno': 1,
'uri': f'http://{address}/',
'info': 'ignored redirect: http://example.test/migrated',
}
def make_retry_after_handler(
responses: list[tuple[int, str | None]],
) -> type[BaseHTTPRequestHandler]:
class RetryAfterHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_HEAD(self) -> None:
status, retry_after = responses.pop(0)
self.send_response(status)
if retry_after:
self.send_header('Retry-After', retry_after)
self.send_header('Content-Length', '0')
self.end_headers()
def log_date_time_string(self) -> str:
"""Strip date and time from logged messages for assertions."""
return ''
return RetryAfterHandler
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
)
def test_too_many_requests_retry_after_int_delay(
app: SphinxTestApp, capsys: pytest.CaptureFixture[str]
) -> None:
with (
serve_application(
app, make_retry_after_handler([(429, '0'), (200, None)])
) as address,
mock.patch('sphinx.builders.linkcheck.DEFAULT_DELAY', 0),
mock.patch('sphinx.builders.linkcheck.QUEUE_POLL_SECS', 0.01),
):
app.build()
content = (app.outdir / 'output.json').read_text(encoding='utf8')
assert json.loads(content) == {
'filename': 'index.rst',
'lineno': 1,
'status': 'working',
'code': 0,
'uri': f'http://{address}/',
'info': '',
}
rate_limit_log = f'-rate limited- http://{address}/ | sleeping...\n'
assert rate_limit_log in strip_escape_sequences(app.status.getvalue())
_stdout, stderr = capsys.readouterr()
assert stderr == textwrap.dedent(
"""\
127.0.0.1 - - [] "HEAD / HTTP/1.1" 429 -
127.0.0.1 - - [] "HEAD / HTTP/1.1" 200 -
""",
)
@pytest.mark.parametrize('tz', [None, 'GMT', 'GMT+3', 'GMT-3'])
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
)
def test_too_many_requests_retry_after_HTTP_date(
tz: str | None,
app: SphinxTestApp,
monkeypatch: pytest.MonkeyPatch,
capsys: pytest.CaptureFixture[str],
) -> None:
retry_after = wsgiref.handlers.format_date_time(time.time())
with monkeypatch.context() as m:
if tz is not None:
m.setenv('TZ', tz)
if sys.platform != 'win32':
time.tzset()
m.setattr(
sphinx.util.http_date, '_GMT_OFFSET', float(time.localtime().tm_gmtoff)
)
with serve_application(
app, make_retry_after_handler([(429, retry_after), (200, None)])
) as address:
app.build()
# Undo side-effects: the monkeypatch context manager clears the TZ environment
# variable, but we also need to reset Python's internal notion of the current
# timezone.
if sys.platform != 'win32':
time.tzset()
content = (app.outdir / 'output.json').read_text(encoding='utf8')
assert json.loads(content) == {
'filename': 'index.rst',
'lineno': 1,
'status': 'working',
'code': 0,
'uri': f'http://{address}/',
'info': '',
}
_stdout, stderr = capsys.readouterr()
assert stderr == textwrap.dedent(
"""\
127.0.0.1 - - [] "HEAD / HTTP/1.1" 429 -
127.0.0.1 - - [] "HEAD / HTTP/1.1" 200 -
""",
)
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
)
def test_too_many_requests_retry_after_without_header(
app: SphinxTestApp, capsys: pytest.CaptureFixture[str]
) -> None:
with (
serve_application(
app, make_retry_after_handler([(429, None), (200, None)])
) as address,
mock.patch('sphinx.builders.linkcheck.DEFAULT_DELAY', 0),
):
app.build()
content = (app.outdir / 'output.json').read_text(encoding='utf8')
assert json.loads(content) == {
'filename': 'index.rst',
'lineno': 1,
'status': 'working',
'code': 0,
'uri': f'http://{address}/',
'info': '',
}
_stdout, stderr = capsys.readouterr()
assert stderr == textwrap.dedent(
"""\
127.0.0.1 - - [] "HEAD / HTTP/1.1" 429 -
127.0.0.1 - - [] "HEAD / HTTP/1.1" 200 -
""",
)
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
confoverrides={
'linkcheck_report_timeouts_as_broken': False,
'linkcheck_timeout': 0.01,
},
)
def test_requests_timeout(app: SphinxTestApp) -> None:
class DelayedResponseHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_GET(self) -> None:
time.sleep(0.2) # wait before sending any response data
self.send_response(200, 'OK')
self.send_header('Content-Length', '0')
self.end_headers()
with serve_application(app, DelayedResponseHandler):
app.build()
with open(app.outdir / 'output.json', encoding='utf-8') as fp:
content = json.load(fp)
assert content['status'] == 'timeout'
@pytest.mark.sphinx(
'linkcheck',
testroot='linkcheck-localserver',
freshenv=True,
confoverrides={'linkcheck_rate_limit_timeout': 0.0},
)
def test_too_many_requests_user_timeout(app: SphinxTestApp) -> None:
with serve_application(app, make_retry_after_handler([(429, None)])) as address:
app.build()
content = (app.outdir / 'output.json').read_text(encoding='utf8')
assert json.loads(content) == {
'filename': 'index.rst',
'lineno': 1,
'status': 'broken',
'code': 0,
'uri': f'http://{address}/',
'info': f'429 Client Error: Too Many Requests for url: http://{address}/',
}
|
RemoteDomainRedirectHandler
|
python
|
plotly__plotly.py
|
plotly/graph_objs/scatterternary/marker/_line.py
|
{
"start": 233,
"end": 20964
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary.marker"
_path_str = "scatterternary.marker.line"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorscale",
"colorsrc",
"reversescale",
"width",
"widthsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is true, the
default palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.line.color`) or the
bounds set in `marker.line.cmin` and `marker.line.cmax` Has an
effect only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be equidistant
to this point. Has an effect only if in `marker.line.color` is
set to a numerical array. Value should have the same units as
in `marker.line.color`. Has no effect when `marker.line.cauto`
is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scatterternary.marker.line.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The colorscale
must be an array containing arrays mapping a normalized value
to an rgb, rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and highest (1) values
are required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the colorscale in
color space, use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,RdBu,Reds,Viridis,
YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.line.color` is set to a numerical array. If true,
`marker.line.cmin` will correspond to the last color in the
array and `marker.line.cmax` will correspond to the first
color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorscale=None,
colorsrc=None,
reversescale=None,
width=None,
widthsrc=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.marker.Line`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.line.colorscale`. Has an effect only if in
`marker.line.color` is set to a numerical array. In
case `colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in
`marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has an effect
only if in `marker.line.color` is set to a numerical
array. Defaults to `false` when `marker.line.cmin` and
`marker.line.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmin` must
be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.line.cmin` and/or `marker.line.cmax` to be
equidistant to this point. Has an effect only if in
`marker.line.color` is set to a numerical array. Value
should have the same units as in `marker.line.color`.
Has no effect when `marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.line.color` is set to a numerical
array. Value should have the same units as in
`marker.line.color` and if set, `marker.line.cmax` must
be set as well.
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.line.cmin` and `marker.line.cmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Blackbody,Bluered,Blues,C
ividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic,Portl
and,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.line.color` is set to a numerical array.
If true, `marker.line.cmin` will correspond to the last
color in the array and `marker.line.cmax` will
correspond to the first color.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Line
|
python
|
huggingface__transformers
|
src/transformers/models/imagegpt/modeling_imagegpt.py
|
{
"start": 1926,
"end": 12178
}
|
class ____(nn.Module):
def __init__(self, config, is_cross_attention: Optional[bool] = False, layer_idx: Optional[int] = None):
super().__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
1, 1, max_positions, max_positions
),
persistent=False,
)
self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.split_size = self.embed_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale_attn_weights = config.scale_attn_weights
self.is_cross_attention = is_cross_attention
# Layer-wise attention scaling, reordering, and upcasting
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
self.layer_idx = layer_idx
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
if self.is_cross_attention:
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
else:
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, query, key, value, attention_mask=None):
attn_weights = torch.matmul(query, key.transpose(-1, -2))
if self.scale_attn_weights:
attn_weights = attn_weights / torch_float(value.size(-1) ** 0.5)
# Layer-wise attention scaling
if self.scale_attn_by_inverse_layer_idx:
attn_weights = attn_weights / float(self.layer_idx + 1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None):
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
bsz, num_heads, q_seq_len, dk = query.size()
_, _, k_seq_len, _ = key.size()
# Preallocate attn_weights for `baddbmm`
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
# Compute Scale Factor
scale_factor = 1.0
if self.scale_attn_weights:
scale_factor /= float(value.size(-1)) ** 0.5
if self.scale_attn_by_inverse_layer_idx:
scale_factor /= float(self.layer_idx + 1)
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
with torch.autocast(query.device.type, enabled=False):
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
mask_value = torch.finfo(attn_weights.dtype).min
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
if attn_weights.dtype != torch.float32:
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
attn_weights = attn_weights.type(value.dtype)
attn_weights = self.attn_dropout(attn_weights)
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3).contiguous()
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(
self,
hidden_states: torch.Tensor,
layer_past: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = False,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple:
is_cross_attention = encoder_hidden_states is not None
bsz, seq_len, _ = hidden_states.shape
if layer_past is not None:
if isinstance(layer_past, EncoderDecoderCache):
is_updated = layer_past.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = layer_past.cross_attention_cache
else:
curr_past_key_values = layer_past.self_attention_cache
else:
curr_past_key_values = layer_past
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention:
if not hasattr(self, "q_attn"):
raise ValueError(
"If class is used as cross attention, the weights `q_attn` have to be defined. "
"Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`."
)
if layer_past is not None and is_updated:
# reuse k,v, cross_attentions, and compute only q
query = self.q_attn(hidden_states)
key = curr_past_key_values.layers[self.layer_idx].keys
value = curr_past_key_values.layers[self.layer_idx].values
else:
query = self.q_attn(hidden_states)
key, value = self.c_attn(current_states).split(self.split_size, dim=2)
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
else:
query, key, value = self.c_attn(current_states).split(self.split_size, dim=2)
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
if layer_past is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key, value = curr_past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position})
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention:
layer_past.is_updated[self.layer_idx] = True
query = query.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
if self.reorder_and_upcast_attn:
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask)
else:
attn_output, attn_weights = self._attn(query, key, value, attention_mask)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return attn_output, attn_weights
|
ImageGPTAttention
|
python
|
mlflow__mlflow
|
dev/clint/tests/rules/test_mlflow_class_name.py
|
{
"start": 260,
"end": 311
}
|
class ____:
pass
# Bad - using MLFlow
|
MLflowClient
|
python
|
sqlalchemy__sqlalchemy
|
test/engine/test_ddlevents.py
|
{
"start": 16821,
"end": 17402
}
|
class ____(DDLEventWCreateHarness, fixtures.TestBase):
__requires__ = ("sequences",)
creates_implicitly_with_table = False
drops_implicitly_with_table = False
supports_standalone_create = True
@testing.fixture
def produce_subject(self):
return normalize_sequence(config, Sequence("my_seq"))
@testing.fixture
def produce_table_integrated_subject(self, metadata, produce_subject):
return Table(
"t",
metadata,
Column("id", Integer, produce_subject, primary_key=True),
)
|
SequenceDDLEventTest
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.