language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
automl__auto-sklearn
|
autosklearn/evaluation/abstract_evaluator.py
|
{
"start": 1413,
"end": 3451
}
|
class ____(DummyClassifier):
def __init__(
self,
config: Configuration,
random_state: Optional[Union[int, np.random.RandomState]],
feat_type: Optional[FEAT_TYPE_TYPE] = None,
init_params: Optional[Dict[str, Any]] = None,
dataset_properties: Dict[str, Any] = {},
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
):
self.config = config
if config == 1:
super().__init__(strategy="uniform")
else:
super().__init__(strategy="most_frequent")
self.random_state = random_state
self.init_params = init_params
self.dataset_properties = dataset_properties
self.include = include
self.exclude = exclude
self.feat_type = feat_type
def pre_transform(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None,
) -> Tuple[np.ndarray, Dict[str, Any]]:
if fit_params is None:
fit_params = {}
return X, fit_params
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[Union[np.ndarray, List]] = None,
) -> DummyClassifier:
return super(MyDummyClassifier, self).fit(
np.ones((X.shape[0], 1)), y, sample_weight=sample_weight
)
def fit_estimator(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None,
) -> DummyClassifier:
return self.fit(X, y)
def predict_proba(self, X: np.ndarray, batch_size: int = 1000) -> np.ndarray:
new_X = np.ones((X.shape[0], 1))
probas = super().predict_proba(new_X)
probas = convert_multioutput_multiclass_to_multilabel(probas).astype(np.float32)
return probas
def estimator_supports_iterative_fit(self) -> bool:
return False
def get_additional_run_info(self) -> Optional[TYPE_ADDITIONAL_INFO]:
return None
|
MyDummyClassifier
|
python
|
spack__spack
|
lib/spack/spack/spec.py
|
{
"start": 222346,
"end": 222764
}
|
class ____(spack.error.SpecError):
def __init__(self, msg, *specs):
spec_fmt = (
"{namespace}.{name}{@version}{variants}"
"{ platform=architecture.platform}{ os=architecture.os}{ target=architecture.target}"
"{/hash:7}"
)
specs_str = "\n " + "\n ".join(spec.format(spec_fmt) for spec in specs)
super().__init__(msg + specs_str)
|
AmbiguousHashError
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP045.py
|
{
"start": 695,
"end": 1866
}
|
class ____:
service_specification: Optional[str]is not True = None
# Test for: https://github.com/astral-sh/ruff/issues/18508
# Optional[None] should not be offered a fix
foo: Optional[None] = None
from typing import NamedTuple, Optional
import typing_extensions
from typing_extensions import (
NamedTuple as NamedTupleTE,
Optional as OptionalTE,
)
# Regression test for https://github.com/astral-sh/ruff/issues/18619
# Don't emit lint for `NamedTuple`
a1: Optional[NamedTuple] = None
a2: typing.Optional[NamedTuple] = None
a3: OptionalTE[NamedTuple] = None
a4: typing_extensions.Optional[NamedTuple] = None
a5: Optional[typing.NamedTuple] = None
a6: typing.Optional[typing.NamedTuple] = None
a7: OptionalTE[typing.NamedTuple] = None
a8: typing_extensions.Optional[typing.NamedTuple] = None
a9: "Optional[NamedTuple]" = None
a10: Optional[NamedTupleTE] = None
# Test for: https://github.com/astral-sh/ruff/issues/19746
# Nested Optional types should be flattened
nested_optional: Optional[Optional[str]] = None
nested_optional_typing: typing.Optional[Optional[int]] = None
triple_nested_optional: Optional[Optional[Optional[str]]] = None
|
ServiceRefOrValue
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 260245,
"end": 261541
}
|
class ____(rv_continuous):
r"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lomax` is:
.. math::
f(x, c) = \frac{c}{(1+x)^{c+1}}
for :math:`x \ge 0`, :math:`c > 0`.
`lomax` takes ``c`` as a shape parameter for :math:`c`.
`lomax` is a special case of `pareto` with ``loc=-1.0``.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# lomax.pdf(x, c) = c / (1+x)**(c+1)
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return np.log(c) - (c+1)*sc.log1p(x)
def _cdf(self, x, c):
return -sc.expm1(-c*sc.log1p(x))
def _sf(self, x, c):
return np.exp(-c*sc.log1p(x))
def _logsf(self, x, c):
return -c*sc.log1p(x)
def _ppf(self, q, c):
return sc.expm1(-sc.log1p(-q)/c)
def _isf(self, q, c):
return q**(-1.0 / c) - 1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-np.log(c)
lomax = lomax_gen(a=0.0, name="lomax")
|
lomax_gen
|
python
|
mwaskom__seaborn
|
seaborn/_core/subplots.py
|
{
"start": 411,
"end": 9964
}
|
class ____:
"""
Interface for creating and using matplotlib subplots based on seaborn parameters.
Parameters
----------
subplot_spec : dict
Keyword args for :meth:`matplotlib.figure.Figure.subplots`.
facet_spec : dict
Parameters that control subplot faceting.
pair_spec : dict
Parameters that control subplot pairing.
data : PlotData
Data used to define figure setup.
"""
def __init__(
self,
subplot_spec: dict, # TODO define as TypedDict
facet_spec: FacetSpec,
pair_spec: PairSpec,
):
self.subplot_spec = subplot_spec
self._check_dimension_uniqueness(facet_spec, pair_spec)
self._determine_grid_dimensions(facet_spec, pair_spec)
self._handle_wrapping(facet_spec, pair_spec)
self._determine_axis_sharing(pair_spec)
def _check_dimension_uniqueness(
self, facet_spec: FacetSpec, pair_spec: PairSpec
) -> None:
"""Reject specs that pair and facet on (or wrap to) same figure dimension."""
err = None
facet_vars = facet_spec.get("variables", {})
if facet_spec.get("wrap") and {"col", "row"} <= set(facet_vars):
err = "Cannot wrap facets when specifying both `col` and `row`."
elif (
pair_spec.get("wrap")
and pair_spec.get("cross", True)
and len(pair_spec.get("structure", {}).get("x", [])) > 1
and len(pair_spec.get("structure", {}).get("y", [])) > 1
):
err = "Cannot wrap subplots when pairing on both `x` and `y`."
collisions = {"x": ["columns", "rows"], "y": ["rows", "columns"]}
for pair_axis, (multi_dim, wrap_dim) in collisions.items():
if pair_axis not in pair_spec.get("structure", {}):
continue
elif multi_dim[:3] in facet_vars:
err = f"Cannot facet the {multi_dim} while pairing on `{pair_axis}``."
elif wrap_dim[:3] in facet_vars and facet_spec.get("wrap"):
err = f"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``."
elif wrap_dim[:3] in facet_vars and pair_spec.get("wrap"):
err = f"Cannot wrap the {multi_dim} while faceting the {wrap_dim}."
if err is not None:
raise RuntimeError(err) # TODO what err class? Define PlotSpecError?
def _determine_grid_dimensions(
self, facet_spec: FacetSpec, pair_spec: PairSpec
) -> None:
"""Parse faceting and pairing information to define figure structure."""
self.grid_dimensions: dict[str, list] = {}
for dim, axis in zip(["col", "row"], ["x", "y"]):
facet_vars = facet_spec.get("variables", {})
if dim in facet_vars:
self.grid_dimensions[dim] = facet_spec["structure"][dim]
elif axis in pair_spec.get("structure", {}):
self.grid_dimensions[dim] = [
None for _ in pair_spec.get("structure", {})[axis]
]
else:
self.grid_dimensions[dim] = [None]
self.subplot_spec[f"n{dim}s"] = len(self.grid_dimensions[dim])
if not pair_spec.get("cross", True):
self.subplot_spec["nrows"] = 1
self.n_subplots = self.subplot_spec["ncols"] * self.subplot_spec["nrows"]
def _handle_wrapping(
self, facet_spec: FacetSpec, pair_spec: PairSpec
) -> None:
"""Update figure structure parameters based on facet/pair wrapping."""
self.wrap = wrap = facet_spec.get("wrap") or pair_spec.get("wrap")
if not wrap:
return
wrap_dim = "row" if self.subplot_spec["nrows"] > 1 else "col"
flow_dim = {"row": "col", "col": "row"}[wrap_dim]
n_subplots = self.subplot_spec[f"n{wrap_dim}s"]
flow = int(np.ceil(n_subplots / wrap))
if wrap < self.subplot_spec[f"n{wrap_dim}s"]:
self.subplot_spec[f"n{wrap_dim}s"] = wrap
self.subplot_spec[f"n{flow_dim}s"] = flow
self.n_subplots = n_subplots
self.wrap_dim = wrap_dim
def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:
"""Update subplot spec with default or specified axis sharing parameters."""
axis_to_dim = {"x": "col", "y": "row"}
key: str
val: str | bool
for axis in "xy":
key = f"share{axis}"
# Always use user-specified value, if present
if key not in self.subplot_spec:
if axis in pair_spec.get("structure", {}):
# Paired axes are shared along one dimension by default
if self.wrap is None and pair_spec.get("cross", True):
val = axis_to_dim[axis]
else:
val = False
else:
# This will pick up faceted plots, as well as single subplot
# figures, where the value doesn't really matter
val = True
self.subplot_spec[key] = val
def init_figure(
self,
pair_spec: PairSpec,
pyplot: bool = False,
figure_kws: dict | None = None,
target: Axes | Figure | SubFigure | None = None,
) -> Figure:
"""Initialize matplotlib objects and add seaborn-relevant metadata."""
# TODO reduce need to pass pair_spec here?
if figure_kws is None:
figure_kws = {}
if isinstance(target, mpl.axes.Axes):
if max(self.subplot_spec["nrows"], self.subplot_spec["ncols"]) > 1:
err = " ".join([
"Cannot create multiple subplots after calling `Plot.on` with",
f"a {mpl.axes.Axes} object.",
f" You may want to use a {mpl.figure.SubFigure} instead.",
])
raise RuntimeError(err)
self._subplot_list = [{
"ax": target,
"left": True,
"right": True,
"top": True,
"bottom": True,
"col": None,
"row": None,
"x": "x",
"y": "y",
}]
self._figure = target.figure
return self._figure
elif isinstance(target, mpl.figure.SubFigure):
figure = target.figure
elif isinstance(target, mpl.figure.Figure):
figure = target
else:
if pyplot:
figure = plt.figure(**figure_kws)
else:
figure = mpl.figure.Figure(**figure_kws)
target = figure
self._figure = figure
axs = target.subplots(**self.subplot_spec, squeeze=False)
if self.wrap:
# Remove unused Axes and flatten the rest into a (2D) vector
axs_flat = axs.ravel({"col": "C", "row": "F"}[self.wrap_dim])
axs, extra = np.split(axs_flat, [self.n_subplots])
for ax in extra:
ax.remove()
if self.wrap_dim == "col":
axs = axs[np.newaxis, :]
else:
axs = axs[:, np.newaxis]
# Get i, j coordinates for each Axes object
# Note that i, j are with respect to faceting/pairing,
# not the subplot grid itself, (which only matters in the case of wrapping).
iter_axs: np.ndenumerate | zip
if not pair_spec.get("cross", True):
indices = np.arange(self.n_subplots)
iter_axs = zip(zip(indices, indices), axs.flat)
else:
iter_axs = np.ndenumerate(axs)
self._subplot_list = []
for (i, j), ax in iter_axs:
info = {"ax": ax}
nrows, ncols = self.subplot_spec["nrows"], self.subplot_spec["ncols"]
if not self.wrap:
info["left"] = j % ncols == 0
info["right"] = (j + 1) % ncols == 0
info["top"] = i == 0
info["bottom"] = i == nrows - 1
elif self.wrap_dim == "col":
info["left"] = j % ncols == 0
info["right"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)
info["top"] = j < ncols
info["bottom"] = j >= (self.n_subplots - ncols)
elif self.wrap_dim == "row":
info["left"] = i < nrows
info["right"] = i >= self.n_subplots - nrows
info["top"] = i % nrows == 0
info["bottom"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)
if not pair_spec.get("cross", True):
info["top"] = j < ncols
info["bottom"] = j >= self.n_subplots - ncols
for dim in ["row", "col"]:
idx = {"row": i, "col": j}[dim]
info[dim] = self.grid_dimensions[dim][idx]
for axis in "xy":
idx = {"x": j, "y": i}[axis]
if axis in pair_spec.get("structure", {}):
key = f"{axis}{idx}"
else:
key = axis
info[axis] = key
self._subplot_list.append(info)
return figure
def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?
"""Yield each subplot dictionary with Axes object and metadata."""
yield from self._subplot_list
def __len__(self) -> int:
"""Return the number of subplots in this figure."""
return len(self._subplot_list)
|
Subplots
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_commits.py
|
{
"start": 986,
"end": 13122
}
|
class ____(TestCase):
def _test_simple_action(self, user, org):
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
deploy = Deploy.objects.create(organization_id=org.id, release=release2, environment_id=5)
with self.tasks():
with patch.object(Deploy, "notify_if_ready") as mock_notify_if_ready:
fetch_commits(
release_id=release2.id,
user_id=user.id,
refs=refs,
previous_release_id=release.id,
)
commit_list = list(
Commit.objects.filter(releasecommit__release=release2).order_by("releasecommit__order")
)
# see DummyRepositoryProvider.compare_commits
assert len(commit_list) == 3
assert commit_list[0].repository_id == repo.id
assert commit_list[0].organization_id == org.id
assert commit_list[0].key == "62de626b7c7cfb8e77efb4273b1a3df4123e6216"
assert commit_list[1].repository_id == repo.id
assert commit_list[1].organization_id == org.id
assert commit_list[1].key == "58de626b7c7cfb8e77efb4273b1a3df4123e6345"
assert commit_list[2].repository_id == repo.id
assert commit_list[2].organization_id == org.id
assert commit_list[2].key == "b" * 40
mock_notify_if_ready.assert_called_with(deploy.id, fetch_complete=True)
latest_repo_release_environment = LatestRepoReleaseEnvironment.objects.get(
repository_id=repo.id, environment_id=5
)
assert latest_repo_release_environment.deploy_id == deploy.id
assert latest_repo_release_environment.release_id == release2.id
assert latest_repo_release_environment.commit_id == commit_list[0].id
def test_simple(self, mock_record: MagicMock) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
self._test_simple_action(user=self.user, org=org)
def test_duplicate_repositories(self, mock_record: MagicMock) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
Repository.objects.create(
name="example", provider="dummy", organization_id=org.id, status=ObjectStatus.DISABLED
)
Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
self._test_simple_action(user=self.user, org=org)
def test_release_locked(self, mock_record_event: MagicMock) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
old_release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=old_release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
new_release = Release.objects.create(organization_id=org.id, version="12345678")
lock = locks.get(Release.get_lock_key(org.id, new_release.id), duration=10, name="release")
lock.acquire()
with self.tasks():
fetch_commits(
release_id=new_release.id,
user_id=self.user.id,
refs=refs,
previous_release_id=old_release.id,
)
count_query = ReleaseHeadCommit.objects.filter(release=new_release)
# No release commits should be made as the task should return early.
assert count_query.count() == 0
@patch("sentry.tasks.commits.handle_invalid_identity")
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_invalid_identity(
self, mock_compare_commits, mock_handle_invalid_identity, mock_record
):
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
with assume_test_silo_mode(SiloMode.CONTROL):
usa = UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = InvalidIdentity(identity=usa)
fetch_commits(
release_id=release2.id, user_id=self.user.id, refs=refs, previous_release_id=release.id
)
mock_handle_invalid_identity.assert_called_once_with(identity=usa, commit_failure=True)
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_plugin_error(
self, mock_compare_commits: MagicMock, mock_record: MagicMock
) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
with assume_test_silo_mode(SiloMode.CONTROL):
UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = Exception("secrets")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "secrets" not in msg.body
assert_slo_metric(mock_record, EventLifecycleOutcome.FAILURE)
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_plugin_error_for_sentry_app(
self, mock_compare_commits: MagicMock, mock_record: MagicMock
) -> None:
org = self.create_organization(owner=self.user, name="baz")
sentry_app = self.create_sentry_app(
organization=org, published=True, verify_install=False, name="Super Awesome App"
)
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
mock_compare_commits.side_effect = Exception("secrets")
mock_record.reset_mock()
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=sentry_app.proxy_user_id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "secrets" not in msg.body
assert_slo_metric(mock_record, EventLifecycleOutcome.FAILURE)
@patch("sentry.plugins.providers.dummy.repository.DummyRepositoryProvider.compare_commits")
def test_fetch_error_random_exception(
self, mock_compare_commits: MagicMock, mock_record: MagicMock
) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
repo = Repository.objects.create(name="example", provider="dummy", organization_id=org.id)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
with assume_test_silo_mode(SiloMode.CONTROL):
UserSocialAuth.objects.create(user=self.user, provider="dummy")
mock_compare_commits.side_effect = PluginError("You can read me")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "You can read me" in msg.body
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
def test_fetch_error_random_exception_integration(self, mock_record: MagicMock) -> None:
self.login_as(user=self.user)
org = self.create_organization(owner=self.user, name="baz")
with assume_test_silo_mode(SiloMode.CONTROL):
integration = self.create_provider_integration(provider="example", name="Example")
integration.add_organization(org)
repo = Repository.objects.create(
name="example",
provider="integrations:example",
organization_id=org.id,
integration_id=integration.id,
)
release = Release.objects.create(organization_id=org.id, version="abcabcabc")
commit = Commit.objects.create(organization_id=org.id, repository_id=repo.id, key="a" * 40)
ReleaseHeadCommit.objects.create(
organization_id=org.id, repository_id=repo.id, release=release, commit=commit
)
refs = [{"repository": repo.name, "commit": "b" * 40}]
release2 = Release.objects.create(organization_id=org.id, version="12345678")
with self.tasks():
fetch_commits(
release_id=release2.id,
user_id=self.user.id,
refs=refs,
previous_release_id=release.id,
)
msg = mail.outbox[-1]
assert msg.subject == "Unable to Fetch Commits"
assert msg.to == [self.user.email]
assert "Repository not found" in msg.body
assert_slo_metric(mock_record, EventLifecycleOutcome.HALTED)
@control_silo_test
|
FetchCommitsTest
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/frame_methods.py
|
{
"start": 12940,
"end": 13510
}
|
class ____:
params = (["all", "any"], [0, 1])
param_names = ["how", "axis"]
def setup(self, how, axis):
self.df = DataFrame(np.random.randn(10000, 1000))
self.df.iloc[50:1000, 20:50] = np.nan
self.df.iloc[2000:3000] = np.nan
self.df.iloc[:, 60:70] = np.nan
self.df_mixed = self.df.copy()
self.df_mixed["foo"] = "bar"
def time_dropna(self, how, axis):
self.df.dropna(how=how, axis=axis)
def time_dropna_axis_mixed_dtypes(self, how, axis):
self.df_mixed.dropna(how=how, axis=axis)
|
Dropna
|
python
|
allegroai__clearml
|
clearml/storage/helper.py
|
{
"start": 5739,
"end": 14822
}
|
class ____(_Driver):
"""LibCloud http/https adapter (simple, enough for now)"""
timeout_connection = deferred_config("http.timeout.connection", 30)
timeout_total = deferred_config("http.timeout.total", 30)
max_retries = deferred_config("http.download.max_retries", 15)
min_kbps_speed = 50
schemes = ("http", "https")
class _Container(object):
_default_backend_session = None
def __init__(self, name: str, retries: int = 5, **kwargs: Any) -> None:
self.name = name
self.session = get_http_session_with_retry(
total=retries,
connect=retries,
read=retries,
redirect=retries,
backoff_factor=0.5,
backoff_max=120,
status_forcelist=[
requests_codes.request_timeout,
requests_codes.timeout,
requests_codes.bad_gateway,
requests_codes.service_unavailable,
requests_codes.bandwidth_limit_exceeded,
requests_codes.too_many_requests,
],
config=config,
)
self._file_server_hosts = set(_HttpDriver.get_file_server_hosts())
def _should_attach_auth_header(self) -> bool:
return any(
(self.name.rstrip("/") == host.rstrip("/") or self.name.startswith(host.rstrip("/") + "/"))
for host in self._file_server_hosts
)
def get_headers(self, _: Any) -> Dict[str, str]:
if not self._default_backend_session:
from ..backend_interface.base import InterfaceBase
self._default_backend_session = InterfaceBase._get_default_session()
if self._should_attach_auth_header():
return self._default_backend_session.add_auth_headers({})
class _HttpSessionHandle(object):
def __init__(
self,
url: str,
is_stream: bool,
container_name: str,
object_name: str,
) -> None:
self.url, self.is_stream, self.container_name, self.object_name = (
url,
is_stream,
container_name,
object_name,
)
def __init__(self, retries: Optional[int] = None) -> None:
self._retries = retries or int(self.max_retries)
self._containers = {}
def get_container(
self,
container_name: str,
config: Optional[Any] = None,
**kwargs: Any,
) -> _Container:
if container_name not in self._containers:
self._containers[container_name] = self._Container(name=container_name, retries=self._retries, **kwargs)
return self._containers[container_name]
def upload_object_via_stream(
self,
iterator: Any,
container: Any,
object_name: str,
extra: dict = None,
callback: Any = None,
**kwargs: Any,
) -> requests.Response:
def monitor_callback(monitor: Any) -> None:
new_chunk = monitor.bytes_read - monitor.previous_read
monitor.previous_read = monitor.bytes_read
try:
callback(new_chunk)
except Exception as ex:
self.get_logger().debug("Exception raised when running callback function: {}".format(ex))
# when sending data in post, there is no connection timeout, just an entire upload timeout
timeout = int(self.timeout_total)
url = container.name
path = object_name
if not urlparse(url).netloc:
host, _, path = object_name.partition("/")
url += host + "/"
stream_size = None
if hasattr(iterator, "tell") and hasattr(iterator, "seek"):
pos = iterator.tell()
iterator.seek(0, 2)
stream_size = iterator.tell() - pos
iterator.seek(pos, 0)
timeout = max(timeout, (stream_size / 1024) / float(self.min_kbps_speed))
m = MultipartEncoder(fields={path: (path, iterator, get_file_mimetype(object_name))})
if callback and stream_size:
m = MultipartEncoderMonitor(m, callback=monitor_callback)
m.previous_read = 0
headers = {
"Content-Type": m.content_type,
}
headers.update(container.get_headers(url) or {})
res = container.session.post(url, data=m, timeout=timeout, headers=headers)
if res.status_code != requests.codes.ok:
raise ValueError("Failed uploading object {} to {} ({}): {}".format(
object_name, url, res.status_code, res.text))
# call back is useless because we are not calling it while uploading...
return res
def list_container_objects(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError("List is not implemented for http protocol")
def delete_object(self, obj: _HttpSessionHandle, *args: Any, **kwargs: Any) -> bool:
assert isinstance(obj, self._HttpSessionHandle)
container = self._containers[obj.container_name]
res = container.session.delete(obj.url, headers=container.get_headers(obj.url))
if res.status_code != requests.codes.ok:
if not kwargs.get("silent", False):
self.get_logger().warning(
"Failed deleting object %s (%d): %s" % (obj.object_name, res.status_code, res.text)
)
return False
return True
def get_object(
self,
container_name: str,
object_name: str,
*args: Any,
**kwargs: Any,
) -> _HttpSessionHandle:
is_stream = kwargs.get("stream", True)
url = "/".join(
(
container_name[:-1] if container_name.endswith("/") else container_name,
object_name.lstrip("/"),
)
)
return self._HttpSessionHandle(url, is_stream, container_name, object_name)
def _get_download_object(self, obj: Any) -> Any:
# bypass for session result
if not isinstance(obj, self._HttpSessionHandle):
return obj
container = self._containers[obj.container_name]
# set stream flag before we send the request
container.session.stream = obj.is_stream
res = container.session.get(
obj.url,
timeout=(int(self.timeout_connection), int(self.timeout_total)),
headers=container.get_headers(obj.url),
)
if res.status_code != requests.codes.ok:
raise ValueError("Failed getting object %s (%d): %s" % (obj.object_name, res.status_code, res.reason))
return res
def download_object_as_stream(self, obj: Any, chunk_size: int = 64 * 1024, **_: Any) -> Iterable[bytes]:
# return iterable object
obj = self._get_download_object(obj)
return obj.iter_content(chunk_size=chunk_size)
def download_object(
self,
obj: Any,
local_path: str,
overwrite_existing: bool = True,
delete_on_failure: bool = True,
callback: Callable[[int], None] = None,
**_: Any,
) -> int:
obj = self._get_download_object(obj)
p = Path(local_path)
if not overwrite_existing and p.is_file():
self.get_logger().warning("failed saving after download: overwrite=False and file exists (%s)" % str(p))
return
length = 0
with p.open(mode="wb") as f:
for chunk in obj.iter_content(chunk_size=5 * 1024 * 1024):
# filter out keep-alive new chunks
if not chunk:
continue
chunk_size = len(chunk)
f.write(chunk)
length += chunk_size
if callback:
callback(chunk_size)
return length
def get_direct_access(self, remote_path: str, **_: Any) -> None:
return None
def test_upload(self, test_path: str, config: Any, **kwargs: Any) -> bool:
return True
def upload_object(
self,
file_path: str,
container: Any,
object_name: str,
extra: dict,
callback: Optional[Callable] = None,
**kwargs: Any,
) -> Any:
with open(file_path, "rb") as stream:
return self.upload_object_via_stream(
iterator=stream,
container=container,
object_name=object_name,
extra=extra,
callback=callback,
**kwargs,
)
def exists_file(self, container_name: str, object_name: str) -> bool:
# noinspection PyBroadException
try:
container = self.get_container(container_name)
url = container_name + object_name
return container.session.head(url, allow_redirects=True, headers=container.get_headers(url)).ok
except Exception:
return False
|
_HttpDriver
|
python
|
huggingface__transformers
|
src/transformers/models/aya_vision/modeling_aya_vision.py
|
{
"start": 7219,
"end": 14914
}
|
class ____(AyaVisionPreTrainedModel):
_checkpoint_conversion_mapping = {
r"^language_model.model": "language_model",
}
def __init__(self, config: AyaVisionConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = AyaVisionMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}")
kwargs = {k: v for k, v in kwargs.items() if v is not None}
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
image_outputs = self.vision_tower(pixel_values, output_hidden_states=True, **kwargs)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
selected_image_feature = selected_image_feature[:, 1:]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
# For default; crop CLS from each hidden state in the hidden state pool
if vision_feature_select_strategy == "default":
hs_pool = [hs[:, 1:] for hs in hs_pool]
selected_image_feature = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, AyaVisionModelOutputWithPast]:
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return AyaVisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring(
custom_intro="""
The AYA_VISION model which consists of a vision backbone and a language model.
"""
)
|
AyaVisionModel
|
python
|
joblib__joblib
|
joblib/_dask.py
|
{
"start": 3868,
"end": 13217
}
|
class ____(AutoBatchingMixin, ParallelBackendBase):
MIN_IDEAL_BATCH_DURATION = 0.2
MAX_IDEAL_BATCH_DURATION = 1.0
supports_retrieve_callback = True
default_n_jobs = -1
def __init__(
self,
scheduler_host=None,
scatter=None,
client=None,
loop=None,
wait_for_workers_timeout=10,
**submit_kwargs,
):
super().__init__()
if distributed is None:
msg = (
"You are trying to use 'dask' as a joblib parallel backend "
"but dask is not installed. Please install dask "
"to fix this error."
)
raise ValueError(msg)
if client is None:
if scheduler_host:
client = Client(scheduler_host, loop=loop, set_as_default=False)
else:
try:
client = get_client()
except ValueError as e:
msg = (
"To use Joblib with Dask first create a Dask Client"
"\n\n"
" from dask.distributed import Client\n"
" client = Client()\n"
"or\n"
" client = Client('scheduler-address:8786')"
)
raise ValueError(msg) from e
self.client = client
if scatter is not None and not isinstance(scatter, (list, tuple)):
raise TypeError(
"scatter must be a list/tuple, got `%s`" % type(scatter).__name__
)
if scatter is not None and len(scatter) > 0:
# Keep a reference to the scattered data to keep the ids the same
self._scatter = list(scatter)
scattered = self.client.scatter(scatter, broadcast=True)
self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
else:
self._scatter = []
self.data_futures = {}
self.wait_for_workers_timeout = wait_for_workers_timeout
self.submit_kwargs = submit_kwargs
self.waiting_futures = as_completed(
[], loop=client.loop, with_results=True, raise_errors=False
)
self._results = {}
self._callbacks = {}
async def _collect(self):
while self._continue:
async for future, result in self.waiting_futures:
cf_future = self._results.pop(future)
callback = self._callbacks.pop(future)
if future.status == "error":
typ, exc, tb = result
cf_future.set_exception(exc)
else:
cf_future.set_result(result)
callback(result)
await asyncio.sleep(0.01)
def __reduce__(self):
return (DaskDistributedBackend, ())
def get_nested_backend(self):
return DaskDistributedBackend(client=self.client), -1
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.parallel = parallel
return self.effective_n_jobs(n_jobs)
def start_call(self):
self._continue = True
self.client.loop.add_callback(self._collect)
self.call_data_futures = _WeakKeyDictionary()
def stop_call(self):
# The explicit call to clear is required to break a cycling reference
# to the futures.
self._continue = False
# wait for the future collection routine (self._backend._collect) to
# finish in order to limit asyncio warnings due to aborting _collect
# during a following backend termination call
time.sleep(0.01)
self.call_data_futures.clear()
def effective_n_jobs(self, n_jobs):
effective_n_jobs = sum(self.client.ncores().values())
if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
return effective_n_jobs
# If there is no worker, schedule a probe task to wait for the workers
# to come up and be available. If the dask cluster is in adaptive mode
# task might cause the cluster to provision some workers.
try:
self.client.submit(_joblib_probe_task).result(
timeout=self.wait_for_workers_timeout
)
except _TimeoutError as e:
error_msg = (
"DaskDistributedBackend has no worker after {} seconds. "
"Make sure that workers are started and can properly connect "
"to the scheduler and increase the joblib/dask connection "
"timeout with:\n\n"
"parallel_config(backend='dask', wait_for_workers_timeout={})"
).format(
self.wait_for_workers_timeout,
max(10, 2 * self.wait_for_workers_timeout),
)
raise TimeoutError(error_msg) from e
return sum(self.client.ncores().values())
async def _to_func_args(self, func):
itemgetters = dict()
# Futures that are dynamically generated during a single call to
# Parallel.__call__.
call_data_futures = getattr(self, "call_data_futures", None)
async def maybe_to_futures(args):
out = []
for arg in args:
arg_id = id(arg)
if arg_id in itemgetters:
out.append(itemgetters[arg_id])
continue
f = self.data_futures.get(arg_id, None)
if f is None and call_data_futures is not None:
try:
f = await call_data_futures[arg]
except KeyError:
pass
if f is None:
if is_weakrefable(arg) and sizeof(arg) > 1e3:
# Automatically scatter large objects to some of
# the workers to avoid duplicated data transfers.
# Rely on automated inter-worker data stealing if
# more workers need to reuse this data
# concurrently.
# set hash=False - nested scatter calls (i.e
# calling client.scatter inside a dask worker)
# using hash=True often raise CancelledError,
# see dask/distributed#3703
_coro = self.client.scatter(
arg, asynchronous=True, hash=False
)
# Centralize the scattering of identical arguments
# between concurrent apply_async callbacks by
# exposing the running coroutine in
# call_data_futures before it completes.
t = asyncio.Task(_coro)
call_data_futures[arg] = t
f = await t
if f is not None:
out.append(f)
else:
out.append(arg)
return out
tasks = []
for f, args, kwargs in func.items:
args = list(await maybe_to_futures(args))
kwargs = dict(zip(kwargs.keys(), await maybe_to_futures(kwargs.values())))
tasks.append((f, args, kwargs))
return (Batch(tasks), tasks)
def apply_async(self, func, callback=None):
cf_future = concurrent.futures.Future()
cf_future.get = cf_future.result # achieve AsyncResult API
async def f(func, callback):
batch, tasks = await self._to_func_args(func)
key = f"{repr(batch)}-{uuid4().hex}"
dask_future = self.client.submit(
_TracebackCapturingWrapper(batch),
tasks=tasks,
key=key,
**self.submit_kwargs,
)
self.waiting_futures.add(dask_future)
self._callbacks[dask_future] = callback
self._results[dask_future] = cf_future
self.client.loop.add_callback(f, func, callback)
return cf_future
def retrieve_result_callback(self, out):
return _retrieve_traceback_capturing_wrapped_call(out)
def abort_everything(self, ensure_ready=True):
"""Tell the client to cancel any task submitted via this instance
joblib.Parallel will never access those results
"""
with self.waiting_futures.lock:
self.waiting_futures.futures.clear()
while not self.waiting_futures.queue.empty():
self.waiting_futures.queue.get()
@contextlib.contextmanager
def retrieval_context(self):
"""Override ParallelBackendBase.retrieval_context to avoid deadlocks.
This removes thread from the worker's thread pool (using 'secede').
Seceding avoids deadlock in nested parallelism settings.
"""
# See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
# this is used.
if hasattr(thread_state, "execution_state"):
# we are in a worker. Secede to avoid deadlock.
secede()
yield
if hasattr(thread_state, "execution_state"):
rejoin()
|
DaskDistributedBackend
|
python
|
gevent__gevent
|
src/gevent/tests/test__subprocess.py
|
{
"start": 858,
"end": 12371
}
|
class ____(greentest.TestCase):
# Use the normal error handling. Make sure that any background greenlets
# subprocess spawns propagate errors as expected.
error_fatal = False
def test_exit(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(10)'])
self.assertEqual(popen.wait(), 10)
def test_wait(self):
popen = subprocess.Popen([sys.executable, '-c', 'import sys; sys.exit(11)'])
gevent.wait([popen])
self.assertEqual(popen.poll(), 11)
def test_child_exception(self):
with self.assertRaises(OSError) as exc:
subprocess.Popen(['*']).wait()
self.assertEqual(exc.exception.errno, 2)
def test_leak(self):
num_before = greentest.get_number_open_files()
p = subprocess.Popen([sys.executable, "-c", "print()"],
stdout=subprocess.PIPE)
p.wait()
p.stdout.close()
del p
num_after = greentest.get_number_open_files()
self.assertEqual(num_before, num_after)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
if sys.executable.endswith('-dbg'):
assert stderr.startswith(b'pineapple')
else:
self.assertEqual(stderr, b"pineapple")
@greentest.skipIf(subprocess.mswindows,
"Windows does weird things here")
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults")
def test_communicate_universal(self):
# Native string all the things. See https://github.com/gevent/gevent/issues/1039
p = subprocess.Popen(
[
sys.executable,
"-W", "ignore",
"-c",
'import sys,os;'
'sys.stderr.write("pineapple\\r\\n\\xff\\xff\\xf2\\xf9\\r\\n");'
'sys.stdout.write(sys.stdin.read())'
],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
(stdout, stderr) = p.communicate('banana\r\n\xff\xff\xf2\xf9\r\n')
self.assertIsInstance(stdout, str)
self.assertIsInstance(stderr, str)
self.assertEqual(stdout,
'banana\n\xff\xff\xf2\xf9\n')
self.assertEqual(stderr,
'pineapple\n\xff\xff\xf2\xf9\n')
@greentest.skipOnWindows("Windows IO is weird; this doesn't raise")
def test_communicate_undecodable(self):
# If the subprocess writes non-decodable data, `communicate` raises the
# same UnicodeDecodeError that the stdlib does, instead of
# printing it to the hub. This only applies to Python 3, because only it
# will actually use text mode.
# See https://github.com/gevent/gevent/issues/1510
with subprocess.Popen(
[
sys.executable,
'-W', 'ignore',
'-c',
"import os, sys; "
r'os.write(sys.stdout.fileno(), b"\xff")'
],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, universal_newlines=True
) as p:
with self.assertRaises(UnicodeDecodeError):
p.communicate()
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal1(self):
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_universal2(self):
with subprocess.Popen(
[
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'sys.stdout.write("line1\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line2\\r");'
'sys.stdout.flush();'
'sys.stdout.write("line3\\r\\n");'
'sys.stdout.flush();'
'sys.stdout.write("line4\\r\\nline5");'
'sys.stdout.flush();'
'sys.stdout.write("\\nline6");'
],
stdout=subprocess.PIPE,
universal_newlines=1,
bufsize=1
) as p:
stdout = p.stdout.read()
if python_universal_newlines:
# Interpreter with universal newline support
if not python_universal_newlines_broken:
self.assertEqual(stdout,
"line1\nline2\nline3\nline4\nline5\nline6")
else:
# Note the extra newline after line 3
self.assertEqual(stdout,
'line1\nline2\nline3\n\nline4\n\nline5\nline6')
else:
# Interpreter without universal newline support
self.assertEqual(stdout,
"line1\nline2\rline3\r\nline4\r\nline5\nline6")
@greentest.skipOnWindows("Uses 'grep' command")
def test_nonblock_removed(self):
# see issue #134
r, w = os.pipe()
stdin = subprocess.FileObject(r)
with subprocess.Popen(['grep', 'text'], stdin=stdin) as p:
try:
# Closing one half of the pipe causes Python 3 on OS X to terminate the
# child process; it exits with code 1 and the assert that p.poll is None
# fails. Removing the close lets it pass under both Python 3 and 2.7.
# If subprocess.Popen._remove_nonblock_flag is changed to a noop, then
# the test fails (as expected) even with the close removed
#os.close(w)
time.sleep(0.1)
self.assertEqual(p.poll(), None)
finally:
if p.poll() is None:
p.kill()
stdin.close()
os.close(w)
def test_issue148(self):
for _ in range(7):
with self.assertRaises(OSError) as exc:
with subprocess.Popen('this_name_must_not_exist'):
pass
self.assertEqual(exc.exception.errno, errno.ENOENT)
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def test_check_output_keyword_error(self):
with self.assertRaises(subprocess.CalledProcessError) as exc: # pylint:disable=no-member
subprocess.check_output([sys.executable, '-c', 'import sys; sys.exit(44)'])
self.assertEqual(exc.exception.returncode, 44)
@greentest.skipOnPy3("The default buffer changed in Py3")
def test_popen_bufsize(self):
# Test that subprocess has unbuffered output by default
# (as the vanilla subprocess module)
with subprocess.Popen(
[sys.executable, '-u', '-c',
'import sys; sys.stdout.write(sys.stdin.readline())'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
) as p:
p.stdin.write(b'foobar\n')
r = p.stdout.readline()
self.assertEqual(r, b'foobar\n')
@greentest.ignores_leakcheck
@greentest.skipOnWindows("Not sure why?")
def test_subprocess_in_native_thread(self):
# gevent.subprocess doesn't work from a background
# native thread. See #688
from gevent import monkey
# must be a native thread; defend against monkey-patching
ex = []
Thread = monkey.get_original('threading', 'Thread')
def fn():
with self.assertRaises(TypeError) as exc:
gevent.subprocess.Popen('echo 123', shell=True)
ex.append(exc.exception)
thread = Thread(target=fn)
thread.start()
thread.join()
self.assertEqual(len(ex), 1)
self.assertTrue(isinstance(ex[0], TypeError), ex)
self.assertEqual(ex[0].args[0], 'child watchers are only available on the default loop')
@greentest.skipOnLibuvOnPyPyOnWin("hangs")
def __test_no_output(self, kwargs, kind):
with subprocess.Popen(
[sys.executable, '-c', 'pass'],
stdout=subprocess.PIPE,
**kwargs
) as proc:
stdout, stderr = proc.communicate()
self.assertIsInstance(stdout, kind)
self.assertIsNone(stderr)
@greentest.skipOnLibuvOnCIOnPyPy("Sometimes segfaults; "
"https://travis-ci.org/gevent/gevent/jobs/327357682")
def test_universal_newlines_text_mode_no_output_is_always_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'universal_newlines': True}, str)
@greentest.skipIf(sys.version_info[:2] < (3, 6), "Need encoding argument")
def test_encoded_text_mode_no_output_is_str(self):
# If the file is in universal_newlines mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({'encoding': 'utf-8'}, str)
def test_default_mode_no_output_is_always_str(self):
# If the file is in default mode, we should always get a str when
# there is no output.
# https://github.com/gevent/gevent/pull/939
self.__test_no_output({}, bytes)
@greentest.skipOnWindows("Testing POSIX fd closing")
|
TestPopen
|
python
|
doocs__leetcode
|
solution/2500-2599/2585.Number of Ways to Earn Points/Solution.py
|
{
"start": 0,
"end": 515
}
|
class ____:
def waysToReachTarget(self, target: int, types: List[List[int]]) -> int:
n = len(types)
mod = 10**9 + 7
f = [[0] * (target + 1) for _ in range(n + 1)]
f[0][0] = 1
for i in range(1, n + 1):
count, marks = types[i - 1]
for j in range(target + 1):
for k in range(count + 1):
if j >= k * marks:
f[i][j] = (f[i][j] + f[i - 1][j - k * marks]) % mod
return f[n][target]
|
Solution
|
python
|
django__django
|
tests/serializers/models/data.py
|
{
"start": 1252,
"end": 1331
}
|
class ____(models.Model):
data = models.FilePathField(null=True)
|
FilePathData
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-beautiful-splits-in-an-array.py
|
{
"start": 984,
"end": 1636
}
|
class ____(object):
def beautifulSplits(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
dp = [[0]*len(nums) for _ in xrange(len(nums))]
for i in reversed(xrange(len(nums))):
for j in xrange(i+1, len(dp)):
dp[i][j] = 1+(dp[i+1][j+1] if j+1 < len(nums) else 0) if nums[i] == nums[j] else 0
result = 0
for i in xrange(1, len(nums)-1):
for j in xrange(i+1, len(nums)):
if (dp[0][i] >= i and j-i >= i) or dp[i][j] >= j-i:
result += 1
return result
# Time: O(n^2)
# Space: O(n)
# z-function
|
Solution2
|
python
|
python__mypy
|
mypy/inspections.py
|
{
"start": 6147,
"end": 23813
}
|
class ____:
"""Engine for locating and statically inspecting expressions."""
def __init__(
self,
fg_manager: FineGrainedBuildManager,
*,
verbosity: int = 0,
limit: int = 0,
include_span: bool = False,
include_kind: bool = False,
include_object_attrs: bool = False,
union_attrs: bool = False,
force_reload: bool = False,
) -> None:
self.fg_manager = fg_manager
self.verbosity = verbosity
self.limit = limit
self.include_span = include_span
self.include_kind = include_kind
self.include_object_attrs = include_object_attrs
self.union_attrs = union_attrs
self.force_reload = force_reload
# Module for which inspection was requested.
self.module: State | None = None
def reload_module(self, state: State) -> None:
"""Reload given module while temporary exporting types."""
old = self.fg_manager.manager.options.export_types
self.fg_manager.manager.options.export_types = True
try:
self.fg_manager.flush_cache()
assert state.path is not None
self.fg_manager.update([(state.id, state.path)], [])
finally:
self.fg_manager.manager.options.export_types = old
def expr_type(self, expression: Expression) -> tuple[str, bool]:
"""Format type for an expression using current options.
If type is known, second item returned is True. If type is not known, an error
message is returned instead, and second item returned is False.
"""
expr_type = self.fg_manager.manager.all_types.get(expression)
if expr_type is None:
return self.missing_type(expression), False
type_str = format_type(
expr_type, self.fg_manager.manager.options, verbosity=self.verbosity
)
return self.add_prefixes(type_str, expression), True
def object_type(self) -> Instance:
builtins = self.fg_manager.graph["builtins"].tree
assert builtins is not None
object_node = builtins.names["object"].node
assert isinstance(object_node, TypeInfo)
return Instance(object_node, [])
def collect_attrs(self, instances: list[Instance]) -> dict[TypeInfo, list[str]]:
"""Collect attributes from all union/typevar variants."""
def item_attrs(attr_dict: dict[TypeInfo, list[str]]) -> set[str]:
attrs = set()
for base in attr_dict:
attrs |= set(attr_dict[base])
return attrs
def cmp_types(x: TypeInfo, y: TypeInfo) -> int:
if x in y.mro:
return 1
if y in x.mro:
return -1
return 0
# First gather all attributes for every union variant.
assert instances
all_attrs = []
for instance in instances:
attrs = {}
mro = instance.type.mro
if not self.include_object_attrs:
mro = mro[:-1]
for base in mro:
attrs[base] = sorted(base.names)
all_attrs.append(attrs)
# Find attributes valid for all variants in a union or type variable.
intersection = item_attrs(all_attrs[0])
for item in all_attrs[1:]:
intersection &= item_attrs(item)
# Combine attributes from all variants into a single dict while
# also removing invalid attributes (unless using --union-attrs).
combined_attrs = defaultdict(list)
for item in all_attrs:
for base in item:
if base in combined_attrs:
continue
for name in item[base]:
if self.union_attrs or name in intersection:
combined_attrs[base].append(name)
# Sort bases by MRO, unrelated will appear in the order they appeared as union variants.
sorted_bases = sorted(combined_attrs.keys(), key=cmp_to_key(cmp_types))
result = {}
for base in sorted_bases:
if not combined_attrs[base]:
# Skip bases where everytihng was filtered out.
continue
result[base] = combined_attrs[base]
return result
def _fill_from_dict(
self, attrs_strs: list[str], attrs_dict: dict[TypeInfo, list[str]]
) -> None:
for base in attrs_dict:
cls_name = base.name if self.verbosity < 1 else base.fullname
attrs = [f'"{attr}"' for attr in attrs_dict[base]]
attrs_strs.append(f'"{cls_name}": [{", ".join(attrs)}]')
def expr_attrs(self, expression: Expression) -> tuple[str, bool]:
"""Format attributes that are valid for a given expression.
If expression type is not an Instance, try using fallback. Attributes are
returned as a JSON (ordered by MRO) that maps base class name to list of
attributes. Attributes may appear in multiple bases if overridden (we simply
follow usual mypy logic for creating new Vars etc).
"""
expr_type = self.fg_manager.manager.all_types.get(expression)
if expr_type is None:
return self.missing_type(expression), False
expr_type = get_proper_type(expr_type)
instances = get_instance_fallback(expr_type)
if not instances:
# Everything is an object in Python.
instances = [self.object_type()]
attrs_dict = self.collect_attrs(instances)
# Special case: modules have names apart from those from ModuleType.
if isinstance(expression, RefExpr) and isinstance(expression.node, MypyFile):
node = expression.node
names = sorted(node.names)
if "__builtins__" in names:
# This is just to make tests stable. No one will really need this name.
names.remove("__builtins__")
mod_dict = {f'"<{node.fullname}>"': [f'"{name}"' for name in names]}
else:
mod_dict = {}
# Special case: for class callables, prepend with the class attributes.
# TODO: also handle cases when such callable appears in a union.
if isinstance(expr_type, FunctionLike) and expr_type.is_type_obj():
template = fill_typevars_with_any(expr_type.type_object())
class_dict = self.collect_attrs(get_instance_fallback(template))
else:
class_dict = {}
# We don't use JSON dump to be sure keys order is always preserved.
base_attrs = []
if mod_dict:
for mod in mod_dict:
base_attrs.append(f'{mod}: [{", ".join(mod_dict[mod])}]')
self._fill_from_dict(base_attrs, class_dict)
self._fill_from_dict(base_attrs, attrs_dict)
return self.add_prefixes(f'{{{", ".join(base_attrs)}}}', expression), True
def format_node(self, module: State, node: FuncBase | SymbolNode) -> str:
return f"{module.path}:{node.line}:{node.column + 1}:{node.name}"
def collect_nodes(self, expression: RefExpr) -> list[FuncBase | SymbolNode]:
"""Collect nodes that can be referred to by an expression.
Note: it can be more than one for example in case of a union attribute.
"""
node: FuncBase | SymbolNode | None = expression.node
nodes: list[FuncBase | SymbolNode]
if node is None:
# Tricky case: instance attribute
if isinstance(expression, MemberExpr) and expression.kind is None:
base_type = self.fg_manager.manager.all_types.get(expression.expr)
if base_type is None:
return []
# Now we use the base type to figure out where the attribute is defined.
base_type = get_proper_type(base_type)
instances = get_instance_fallback(base_type)
nodes = []
for instance in instances:
node = find_node(expression.name, instance.type)
if node:
nodes.append(node)
if not nodes:
# Try checking class namespace if attribute is on a class object.
if isinstance(base_type, FunctionLike) and base_type.is_type_obj():
instances = get_instance_fallback(
fill_typevars_with_any(base_type.type_object())
)
for instance in instances:
node = find_node(expression.name, instance.type)
if node:
nodes.append(node)
else:
# Still no luck, give up.
return []
else:
return []
else:
# Easy case: a module-level definition
nodes = [node]
return nodes
def modules_for_nodes(
self, nodes: list[FuncBase | SymbolNode], expression: RefExpr
) -> tuple[dict[FuncBase | SymbolNode, State], bool]:
"""Gather modules where given nodes where defined.
Also check if they need to be refreshed (cached nodes may have
lines/columns missing).
"""
modules = {}
reload_needed = False
for node in nodes:
module = find_module_by_fullname(node.fullname, self.fg_manager.graph)
if not module:
if expression.kind == LDEF and self.module:
module = self.module
else:
continue
modules[node] = module
if not module.tree or module.tree.is_cache_skeleton or self.force_reload:
reload_needed |= not module.tree or module.tree.is_cache_skeleton
self.reload_module(module)
return modules, reload_needed
def expression_def(self, expression: Expression) -> tuple[str, bool]:
"""Find and format definition location for an expression.
If it is not a RefExpr, it is effectively skipped by returning an
empty result.
"""
if not isinstance(expression, RefExpr):
# If there are no suitable matches at all, we return error later.
return "", True
nodes = self.collect_nodes(expression)
if not nodes:
return self.missing_node(expression), False
modules, reload_needed = self.modules_for_nodes(nodes, expression)
if reload_needed:
# TODO: line/column are not stored in cache for vast majority of symbol nodes.
# Adding them will make thing faster, but will have visible memory impact.
nodes = self.collect_nodes(expression)
modules, reload_needed = self.modules_for_nodes(nodes, expression)
assert not reload_needed
result = []
for node in modules:
result.append(self.format_node(modules[node], node))
if not result:
return self.missing_node(expression), False
return self.add_prefixes(", ".join(result), expression), True
def missing_type(self, expression: Expression) -> str:
alt_suggestion = ""
if not self.force_reload:
alt_suggestion = " or try --force-reload"
return (
f'No known type available for "{type(expression).__name__}"'
f" (maybe unreachable{alt_suggestion})"
)
def missing_node(self, expression: Expression) -> str:
return (
f'Cannot find definition for "{type(expression).__name__}" at {expr_span(expression)}'
)
def add_prefixes(self, result: str, expression: Expression) -> str:
prefixes = []
if self.include_kind:
prefixes.append(f"{type(expression).__name__}")
if self.include_span:
prefixes.append(expr_span(expression))
if prefixes:
prefix = ":".join(prefixes) + " -> "
else:
prefix = ""
return prefix + result
def run_inspection_by_exact_location(
self,
tree: MypyFile,
line: int,
column: int,
end_line: int,
end_column: int,
method: Callable[[Expression], tuple[str, bool]],
) -> dict[str, object]:
"""Get type of an expression matching a span.
Type or error is returned as a standard daemon response dict.
"""
try:
expression = find_by_location(tree, line, column - 1, end_line, end_column)
except ValueError as err:
return {"error": str(err)}
if expression is None:
span = f"{line}:{column}:{end_line}:{end_column}"
return {"out": f"Can't find expression at span {span}", "err": "", "status": 1}
inspection_str, success = method(expression)
return {"out": inspection_str, "err": "", "status": 0 if success else 1}
def run_inspection_by_position(
self,
tree: MypyFile,
line: int,
column: int,
method: Callable[[Expression], tuple[str, bool]],
) -> dict[str, object]:
"""Get types of all expressions enclosing a position.
Types and/or errors are returned as a standard daemon response dict.
"""
expressions = find_all_by_location(tree, line, column - 1)
if not expressions:
position = f"{line}:{column}"
return {
"out": f"Can't find any expressions at position {position}",
"err": "",
"status": 1,
}
inspection_strs = []
status = 0
for expression in expressions:
inspection_str, success = method(expression)
if not success:
status = 1
if inspection_str:
inspection_strs.append(inspection_str)
if self.limit:
inspection_strs = inspection_strs[: self.limit]
return {"out": "\n".join(inspection_strs), "err": "", "status": status}
def find_module(self, file: str) -> tuple[State | None, dict[str, object]]:
"""Find module by path, or return a suitable error message.
Note we don't use exceptions to simplify handling 1 vs 2 statuses.
"""
if not any(file.endswith(ext) for ext in PYTHON_EXTENSIONS):
return None, {"error": "Source file is not a Python file"}
# We are using a bit slower but robust way to find a module by path,
# to be sure that namespace packages are handled properly.
abs_path = os.path.abspath(file)
state = next((s for s in self.fg_manager.graph.values() if s.abspath == abs_path), None)
self.module = state
return (
state,
{"out": f"Unknown module: {file}", "err": "", "status": 1} if state is None else {},
)
def run_inspection(
self, location: str, method: Callable[[Expression], tuple[str, bool]]
) -> dict[str, object]:
"""Top-level logic to inspect expression(s) at a location.
This can be reused by various simple inspections.
"""
try:
file, pos = parse_location(location)
except ValueError as err:
return {"error": str(err)}
state, err_dict = self.find_module(file)
if state is None:
assert err_dict
return err_dict
# Force reloading to load from cache, account for any edits, etc.
if not state.tree or state.tree.is_cache_skeleton or self.force_reload:
self.reload_module(state)
assert state.tree is not None
if len(pos) == 4:
# Full span, return an exact match only.
line, column, end_line, end_column = pos
return self.run_inspection_by_exact_location(
state.tree, line, column, end_line, end_column, method
)
assert len(pos) == 2
# Inexact location, return all expressions.
line, column = pos
return self.run_inspection_by_position(state.tree, line, column, method)
def get_type(self, location: str) -> dict[str, object]:
"""Get types of expression(s) at a location."""
return self.run_inspection(location, self.expr_type)
def get_attrs(self, location: str) -> dict[str, object]:
"""Get attributes of expression(s) at a location."""
return self.run_inspection(location, self.expr_attrs)
def get_definition(self, location: str) -> dict[str, object]:
"""Get symbol definitions of expression(s) at a location."""
result = self.run_inspection(location, self.expression_def)
if "out" in result and not result["out"]:
# None of the expressions found turns out to be a RefExpr.
_, location = location.split(":", maxsplit=1)
result["out"] = f"No name or member expressions at {location}"
result["status"] = 1
return result
def parse_location(location: str) -> tuple[str, list[int]]:
if location.count(":") < 2:
raise ValueError("Format should be file:line:column[:end_line:end_column]")
parts = location.rsplit(":", maxsplit=2)
start, *rest = parts
# Note: we must allow drive prefix like `C:` on Windows.
if start.count(":") < 2:
return start, [int(p) for p in rest]
parts = start.rsplit(":", maxsplit=2)
start, *start_rest = parts
if start.count(":") < 2:
return start, [int(p) for p in start_rest + rest]
raise ValueError("Format should be file:line:column[:end_line:end_column]")
|
InspectionEngine
|
python
|
numpy__numpy
|
numpy/matrixlib/tests/test_matrix_linalg.py
|
{
"start": 1598,
"end": 1658
}
|
class ____(PinvCases, MatrixTestCase):
pass
|
TestPinvMatrix
|
python
|
getsentry__sentry
|
src/sentry/integrations/jira_server/integration.py
|
{
"start": 11227,
"end": 53014
}
|
class ____(IssueSyncIntegration):
"""
IntegrationInstallation implementation for Jira-Server
"""
comment_key = "sync_comments"
outbound_status_key = "sync_status_forward"
inbound_status_key = "sync_status_reverse"
outbound_assignee_key = "sync_forward_assignment"
inbound_assignee_key = "sync_reverse_assignment"
issues_ignored_fields_key = "issues_ignored_fields"
resolution_strategy_key = "resolution_strategy"
def get_client(self):
try:
return JiraServerClient(integration=self.model, identity=self.default_identity)
except Identity.DoesNotExist:
raise IntegrationError("Identity not found.")
def get_organization_config(self):
configuration: list[_Config] = [
{
"name": self.outbound_status_key,
"type": "choice_mapper",
"label": _("Sync Sentry Status to Jira Server"),
"help": _(
"When a Sentry issue changes status, change the status of the linked ticket in Jira Server."
),
"addButtonText": _("Add Jira Server Project"),
"addDropdown": {
"emptyMessage": _("All projects configured"),
"noResultsMessage": _("Could not find Jira Server project"),
"items": [], # Populated with projects
},
"mappedSelectors": {
"on_resolve": {"choices": [], "placeholder": _("Select a status")},
"on_unresolve": {"choices": [], "placeholder": _("Select a status")},
},
"columnLabels": {
"on_resolve": _("When resolved"),
"on_unresolve": _("When unresolved"),
},
"mappedColumnLabel": _("Jira Server Project"),
"formatMessageValue": False,
},
{
"name": self.outbound_assignee_key,
"type": "boolean",
"label": _("Sync Sentry Assignment to Jira Server"),
"help": _(
"When an issue is assigned in Sentry, assign its linked Jira Server ticket to the same user."
),
},
{
"name": self.comment_key,
"type": "boolean",
"label": _("Sync Sentry Comments to Jira Server"),
"help": _("Post comments from Sentry issues to linked Jira Server tickets"),
},
{
"name": self.inbound_status_key,
"type": "boolean",
"label": _("Sync Jira Server Status to Sentry"),
"help": _(
"When a Jira Server ticket is marked done, resolve its linked issue in Sentry. "
"When a Jira Server ticket is removed from being done, unresolve its linked Sentry issue."
),
},
{
"name": self.inbound_assignee_key,
"type": "boolean",
"label": _("Sync Jira Server Assignment to Sentry"),
"help": _(
"When a ticket is assigned in Jira Server, assign its linked Sentry issue to the same user."
),
},
{
"name": self.resolution_strategy_key,
"label": "Resolve",
"type": "select",
"placeholder": "Resolve",
"choices": [
("resolve", "Resolve"),
("resolve_current_release", "Resolve in Current Release"),
("resolve_next_release", "Resolve in Next Release"),
],
"help": _(
"Select what action to take on Sentry Issue when Jira ticket is marked Done."
),
},
{
"name": self.issues_ignored_fields_key,
"label": "Ignored Fields",
"type": "textarea",
"placeholder": _("components, security, customfield_10006"),
"help": _("Comma-separated Jira field IDs that you want to hide."),
},
]
client = self.get_client()
try:
statuses = [(c["id"], c["name"]) for c in client.get_valid_statuses()]
configuration[0]["mappedSelectors"]["on_resolve"]["choices"] = statuses
configuration[0]["mappedSelectors"]["on_unresolve"]["choices"] = statuses
projects: list[_Project] = [
{"value": p["id"], "label": p["name"]} for p in client.get_projects_list()
]
configuration[0]["addDropdown"]["items"] = projects
except ApiError:
configuration[0]["disabled"] = True
configuration[0]["disabledReason"] = _(
"Unable to communicate with the Jira instance. You may need to reinstall the addon."
)
context = organization_service.get_organization_by_id(
id=self.organization_id, include_teams=False, include_projects=False
)
if context is not None:
organization = context.organization
has_issue_sync = features.has("organizations:integrations-issue-sync", organization)
else:
has_issue_sync = False
if not has_issue_sync:
for field in configuration:
field["disabled"] = True
field["disabledReason"] = _(
"Your organization does not have access to this feature"
)
return configuration
def update_organization_config(self, data):
"""
Update the configuration field for an organization integration.
"""
config = self.org_integration.config
if "sync_status_forward" in data:
project_mappings = data.pop("sync_status_forward")
if any(
not mapping["on_unresolve"] or not mapping["on_resolve"]
for mapping in project_mappings.values()
):
raise IntegrationError("Resolve and unresolve status are required.")
data["sync_status_forward"] = bool(project_mappings)
IntegrationExternalProject.objects.filter(
organization_integration_id=self.org_integration.id
).delete()
for project_id, statuses in project_mappings.items():
IntegrationExternalProject.objects.create(
organization_integration_id=self.org_integration.id,
external_id=project_id,
resolved_status=statuses["on_resolve"],
unresolved_status=statuses["on_unresolve"],
)
if self.issues_ignored_fields_key in data:
ignored_fields_text = data.pop(self.issues_ignored_fields_key)
# While we describe the config as a "comma-separated list", users are likely to
# accidentally use newlines, so we explicitly handle that case. On page
# refresh, they will see how it got interpreted as `get_config_data` will
# re-serialize the config as a comma-separated list.
ignored_fields_list = list(
filter(
None, [field.strip() for field in re.split(r"[,\n\r]+", ignored_fields_text)]
)
)
data[self.issues_ignored_fields_key] = ignored_fields_list
config.update(data)
org_integration = integration_service.update_organization_integration(
org_integration_id=self.org_integration.id,
config=config,
)
if org_integration is not None:
self.org_integration = org_integration
def get_config_data(self):
config = self.org_integration.config
project_mappings = integration_service.get_integration_external_projects(
organization_id=self.org_integration.organization_id,
integration_id=self.org_integration.integration_id,
)
sync_status_forward = {}
for pm in project_mappings:
sync_status_forward[pm.external_id] = {
"on_unresolve": pm.unresolved_status,
"on_resolve": pm.resolved_status,
}
config["sync_status_forward"] = sync_status_forward
config[self.issues_ignored_fields_key] = ", ".join(
config.get(self.issues_ignored_fields_key, "")
)
return config
def sync_metadata(self) -> None:
client = self.get_client()
try:
server_info = client.get_server_info()
projects = client.get_projects_list()
except ApiError as e:
raise IntegrationError(self.message_from_error(e))
self.model.name = server_info["serverTitle"]
# There is no Jira instance icon (there is a favicon, but it doesn't seem
# possible to query that with the API). So instead we just use the first
# project Icon.
if len(projects) > 0:
avatar = projects[0]["avatarUrls"]["48x48"]
self.model.metadata.update({"icon": avatar})
integration_service.update_integration(
integration_id=self.model.id,
name=self.model.name,
metadata=self.model.metadata,
)
def get_link_issue_config(self, group, **kwargs):
fields = super().get_link_issue_config(group, **kwargs)
org = group.organization
autocomplete_url = reverse(
"sentry-extensions-jiraserver-search", args=[org.slug, self.model.id]
)
for field in fields:
if field["name"] == "externalIssue":
field["url"] = autocomplete_url
field["type"] = "select"
default_comment = "Linked Sentry Issue: [{}|{}]".format(
group.qualified_short_id,
absolute_uri(
group.get_absolute_url(
params={"referrer": IntegrationProviderSlug.JIRA_SERVER.value}
)
),
)
fields.append(
{
"name": "comment",
"label": "Comment",
"default": default_comment,
"type": "textarea",
"autosize": True,
"maxRows": 10,
}
)
return fields
def get_issue_url(self, key: str) -> str:
return "{}/browse/{}".format(self.model.metadata["base_url"], key)
def get_persisted_default_config_fields(self) -> Sequence[str]:
return ["project", "issuetype", "priority", "labels"]
def get_persisted_user_default_config_fields(self):
return ["reporter"]
def get_persisted_ignored_fields(self):
return self.org_integration.config.get(self.issues_ignored_fields_key, [])
def get_group_description(self, group, event, **kwargs):
output = [
"Sentry Issue: [{}|{}]".format(
group.qualified_short_id,
absolute_uri(group.get_absolute_url(params={"referrer": "jira_integration"})),
)
]
body = self.get_group_body(group, event)
if body:
output.extend(["", "{code}", body, "{code}"])
return "\n".join(output)
def get_issue(self, issue_id, **kwargs):
"""
Jira installation's implementation of IssueSyncIntegration's `get_issue`.
"""
client = self.get_client()
issue = client.get_issue(issue_id)
fields = issue.get("fields", {})
return {
"key": issue_id,
"title": fields.get("summary"),
"description": fields.get("description"),
}
def create_comment(self, issue_id, user_id, group_note):
# https://jira.atlassian.com/secure/WikiRendererHelpAction.jspa?section=texteffects
comment = group_note.data["text"]
quoted_comment = self.create_comment_attribution(user_id, comment)
return self.get_client().create_comment(issue_id, quoted_comment)
def create_comment_attribution(self, user_id: int, comment_text: str) -> str:
user = user_service.get_user(user_id=user_id)
assert user is not None
attribution = f"{user.name} wrote:\n\n"
return f"{attribution}{{quote}}{comment_text}{{quote}}"
def update_comment(self, issue_id, user_id, group_note):
quoted_comment = self.create_comment_attribution(user_id, group_note.data["text"])
return self.get_client().update_comment(
issue_id, group_note.data["external_id"], quoted_comment
)
def search_issues(self, query: str | None, **kwargs) -> dict[str, Any]:
try:
resp = self.get_client().search_issues(query)
assert isinstance(resp, dict)
return resp
except ApiError as e:
self.raise_error(e)
def make_choices(self, values):
if not values:
return []
results = []
for item in values:
key = item.get("id", None)
if "name" in item:
value = item["name"]
elif "value" in item:
# Value based options prefer the value on submit.
key = item["value"]
value = item["value"]
elif "label" in item:
# Label based options prefer the value on submit.
key = item["label"]
value = item["label"]
else:
continue
results.append((key, value))
return results
def error_message_from_json(self, data):
message = ""
if data.get("errorMessages"):
message = " ".join(data["errorMessages"])
if data.get("errors"):
if message:
message += " "
message += " ".join(f"{k}: {v}" for k, v in data.get("errors").items())
return message
def error_fields_from_json(self, data):
errors = data.get("errors")
if not errors:
return None
return {key: [error] for key, error in data.get("errors").items()}
def search_url(self, org_slug):
return reverse("sentry-extensions-jiraserver-search", args=[org_slug, self.model.id])
def build_dynamic_field(self, field_meta, group=None):
"""
Builds a field based on Jira's meta field information
"""
schema = field_meta["schema"]
# set up some defaults for form fields
fieldtype = "text"
fkwargs = {"label": field_meta["name"], "required": field_meta["required"]}
# override defaults based on field configuration
if (
schema["type"] in ["securitylevel", "priority"]
or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["select"]
):
fieldtype = "select"
fkwargs["choices"] = self.make_choices(field_meta.get("allowedValues"))
elif (
# Assignee and reporter fields
field_meta.get("autoCompleteUrl")
and (schema.get("items") == "user" or schema["type"] == "user")
# Sprint and "Epic Link" fields
or schema.get("custom")
in (JIRA_CUSTOM_FIELD_TYPES["sprint"], JIRA_CUSTOM_FIELD_TYPES["epic"])
# Parent field
or schema["type"] == "issuelink"
):
fieldtype = "select"
if group is not None:
organization = group.organization
else:
ctx = organization_service.get_organization_by_id(
id=self.organization_id, include_teams=False, include_projects=False
)
assert ctx is not None
organization = ctx.organization
fkwargs["url"] = self.search_url(organization.slug)
fkwargs["choices"] = []
elif schema["type"] in ["timetracking"]:
# TODO: Implement timetracking (currently unsupported altogether)
return None
elif schema.get("items") in ["worklog", "attachment"]:
# TODO: Implement worklogs and attachments someday
return None
elif schema["type"] == "array" and schema["items"] != "string":
fieldtype = "select"
fkwargs.update(
{
"multiple": True,
"choices": self.make_choices(field_meta.get("allowedValues")),
"default": "",
}
)
elif schema["type"] == "option" and len(field_meta.get("allowedValues", [])):
fieldtype = "select"
fkwargs.update(
{"choices": self.make_choices(field_meta.get("allowedValues")), "default": ""}
)
# break this out, since multiple field types could additionally
# be configured to use a custom property instead of a default.
if schema.get("custom"):
if schema["custom"] == JIRA_CUSTOM_FIELD_TYPES["textarea"]:
fieldtype = "textarea"
fkwargs["type"] = fieldtype
return fkwargs
def get_projects(self, cached=True):
client = self.get_client()
no_projects_error_message = "Could not fetch project list from Jira Server. Ensure that Jira Server is available and your account is still active."
try:
jira_projects = client.get_projects_list(cached)
except ApiError as e:
logger.info(
"jira_server.get_projects.error",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
"error": str(e),
},
)
raise IntegrationError(no_projects_error_message)
if len(jira_projects) == 0:
logger.info(
"jira_server.get_projects.no_projects",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
},
)
raise IntegrationError(no_projects_error_message)
return jira_projects
@all_silo_function
def get_create_issue_config(self, group: Group | None, user: User | RpcUser, **kwargs):
"""
We use the `group` to get three things: organization_slug, project
defaults, and default title and description. In the case where we're
getting `createIssueConfig` from Jira for Ticket Rules, we don't know
the issue group beforehand.
:param group: (Optional) Group model.
:param user: User model. TODO Make this the first parameter.
:param kwargs: (Optional) Object
* params: (Optional) Object
* params.project: (Optional) Sentry Project object
* params.issuetype: (Optional) String. The Jira issue type. For
example: "Bug", "Epic", "Story".
:return:
"""
kwargs = kwargs or {}
kwargs["link_referrer"] = "jira_server_integration"
params = kwargs.get("params", {})
fields = []
defaults = {}
if group:
fields = super().get_create_issue_config(group, user, **kwargs)
defaults = self.get_defaults(group.project, user)
project_id = params.get("project", defaults.get("project"))
jira_projects = self.get_projects()
if not project_id:
project_id = jira_projects[0]["id"]
logger.info(
"get_create_issue_config.start",
extra={
"organization_id": self.organization_id,
"integration_id": self.model.id,
"num_jira_projects": len(jira_projects),
"project_id": project_id,
},
)
client = self.get_client()
project_field = {
"name": "project",
"label": "Jira Project",
"choices": [(p["id"], p["key"]) for p in jira_projects],
"default": project_id,
"type": "select",
"updatesForm": True,
}
try:
issue_type_choices = client.get_issue_types(project_id)
except ApiError as e:
logger.info(
"get_create_issue_config.get_issue_types.error",
extra={
"organization_id": self.organization_id,
"integration_id": self.model.id,
"num_jira_projects": len(jira_projects),
"project_id": project_id,
"error_message": str(e),
},
)
# return a form with just the project selector and a special form field to show the error
return [
project_field,
{
"name": "error",
"type": "blank",
"help": "Could not fetch issue creation metadata from Jira Server. Ensure that"
" the integration user has access to the requested project.",
},
]
issue_type_choices_formatted = [
(choice["id"], choice["name"]) for choice in issue_type_choices["values"]
]
# check if the issuetype was passed as a parameter
issue_type = params.get("issuetype", defaults.get("issuetype"))
# make sure default issue type is actually one that is allowed for project
valid_issue_type = any(
choice for choice in issue_type_choices["values"] if choice["id"] == issue_type
)
if not issue_type or not valid_issue_type:
# pick the first issue type in the list
issue_type = issue_type_choices["values"][0]["id"]
try:
issue_type_meta = client.get_issue_fields(project_id, issue_type)
except ApiUnauthorized:
logger.info(
"jira_server.get_create_issue_config.unauthorized",
extra={"organization_id": self.organization_id, "jira_project": project_id},
)
raise IntegrationError(
"Could not fetch issue creation metadata from Jira Server. Ensure that"
" the integration user has access to the requested project."
)
fields = [
project_field,
*fields,
{
"name": "issuetype",
"label": "Issue Type",
"default": issue_type or issue_type_meta["id"],
"type": "select",
"choices": issue_type_choices_formatted,
"updatesForm": True,
"required": bool(
issue_type_choices_formatted
), # required if we have any type choices
},
]
# title is renamed to summary before sending to Jira
standard_fields = [f["name"] for f in fields] + ["summary"]
ignored_fields = set()
ignored_fields.update(HIDDEN_ISSUE_FIELDS)
ignored_fields.update(self.get_persisted_ignored_fields())
# apply ordering to fields based on some known built-in Jira fields.
# otherwise weird ordering occurs.
anti_gravity = {
"priority": (-150, ""),
"fixVersions": (-125, ""),
"components": (-100, ""),
"security": (-50, ""),
}
dynamic_fields = [val["fieldId"] for val in issue_type_meta["values"]]
# Sort based on priority, then field name
dynamic_fields.sort(key=lambda f: anti_gravity.get(f, (0, f)))
# Build up some dynamic fields based on what is required.
for field in dynamic_fields:
if field in standard_fields or field in [x.strip() for x in ignored_fields]:
# don't overwrite the fixed fields for the form.
continue
field_meta = [value for value in issue_type_meta["values"] if value["fieldId"] == field]
if len(field_meta) > 0:
mb_field = self.build_dynamic_field(field_meta[0], group)
if mb_field:
if mb_field["label"] in params.get("ignored", []):
continue
mb_field["name"] = field
fields.append(mb_field)
for field in fields:
if field["name"] == "priority":
# whenever priorities are available, put the available ones in the list.
# allowedValues for some reason doesn't pass enough info.
field["choices"] = self.make_choices(client.get_priorities())
field["default"] = defaults.get("priority", "")
elif field["name"] == "fixVersions":
field["choices"] = self.make_choices(client.get_versions(project_id))
elif field["name"] == "labels":
field["default"] = defaults.get("labels", "")
elif field["name"] == "reporter":
reporter_id = defaults.get("reporter", "")
if not reporter_id:
continue
try:
reporter_info = client.get_user(reporter_id)
except ApiError as e:
logger.info(
"jira_server.get_create_issue_config.no-matching-reporter",
extra={
"integration_id": self.model.id,
"organization_id": self.organization_id,
"persisted_reporter_id": reporter_id,
"error": str(e),
},
)
continue
reporter_tuple = build_user_choice(reporter_info, client.user_id_field())
if not reporter_tuple:
continue
reporter_id, reporter_label = reporter_tuple
field["default"] = reporter_id
field["choices"] = [(reporter_id, reporter_label)]
return fields
def create_issue(self, data, **kwargs):
"""
Get the (cached) "createmeta" from Jira to use as a "schema". Clean up
the Jira issue by removing all fields that aren't enumerated by this
schema. Send this cleaned data to Jira. Finally, make another API call
to Jira to make sure the issue was created and return basic issue details.
:param data: JiraServerCreateTicketAction object
:param kwargs: not used
:return: simple object with basic Jira issue details
"""
client = self.get_client()
cleaned_data = {}
# protect against mis-configured integration submitting a form without an
# issuetype assigned.
issue_type = data.get("issuetype")
if not issue_type:
raise IntegrationFormError({"issuetype": ["Issue type is required."]})
jira_project = data.get("project")
if not jira_project:
raise IntegrationFormError({"project": ["Jira project is required"]})
issue_type_meta = client.get_issue_fields(jira_project, issue_type)
if not issue_type_meta:
raise IntegrationConfigurationError(
"Could not fetch issue create configuration from Jira."
)
user_id_field = client.user_id_field()
issue_type_fields = issue_type_meta["values"]
for field in issue_type_fields:
field_name = field["fieldId"]
if field_name == "description":
cleaned_data[field_name] = data[field_name]
continue
elif field_name == "summary":
title = data.get("title")
cleaned_data["summary"] = title[:255] if title else None
continue
elif field_name == "labels" and "labels" in data:
labels = [label.strip() for label in data["labels"].split(",") if label.strip()]
cleaned_data["labels"] = labels
continue
if field_name in data.keys():
v = data.get(field_name)
if not v:
continue
schema = field.get("schema")
if schema:
if schema.get("type") == "string" and not schema.get("custom"):
cleaned_data[field_name] = v
continue
if schema["type"] == "user" or schema.get("items") == "user":
if schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("multiuserpicker"):
# custom multi-picker
v = [{user_id_field: user_id} for user_id in v]
else:
v = {user_id_field: v}
elif schema["type"] == "issuelink": # used by Parent field
v = {"key": v}
elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["epic"]:
v = v
elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["sprint"]:
try:
v = int(v)
except ValueError:
raise IntegrationError(f"Invalid sprint ({v}) specified")
elif schema["type"] == "array" and schema.get("items") == "option":
v = [{"value": vx} for vx in v]
elif schema["type"] == "array" and schema.get("items") == "string":
v = [v]
elif schema["type"] == "array" and schema.get("items") != "string":
v = [{"id": vx} for vx in v]
elif schema["type"] == "option":
v = {"value": v}
elif schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("textarea"):
v = v
elif (
schema["type"] == "number"
or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES["tempo_account"]
):
try:
if "." in v:
v = float(v)
else:
v = int(v)
except ValueError:
pass
elif (
schema.get("type") != "string"
or (schema.get("items") and schema.get("items") != "string")
or schema.get("custom") == JIRA_CUSTOM_FIELD_TYPES.get("select")
):
v = {"id": v}
cleaned_data[field_name] = v
if not (
isinstance(cleaned_data.get("issuetype"), dict)
and "id" in cleaned_data.get("issuetype", {})
):
# something fishy is going on with this field, working on some Jira
# instances, and some not.
# testing against 5.1.5 and 5.1.4 does not convert (perhaps is no longer included
# in the projectmeta API call, and would normally be converted in the
# above clean method.)
cleaned_data["issuetype"] = {"id": issue_type}
# sometimes the project is missing as well and we need to add it
if "project" not in cleaned_data:
cleaned_data["project"] = {"id": jira_project}
try:
logger.info(
"jira_server.create_issue",
extra={
"organization_id": self.organization_id,
"integration_id": self.model.id,
"jira_project": jira_project,
"cleaned_data": cleaned_data,
},
)
response = client.create_issue(cleaned_data)
except Exception as e:
self.raise_error(e)
issue_key = response.get("key")
if not issue_key:
raise IntegrationError("There was an error creating the issue.")
# Immediately fetch and return the created issue.
return self.get_issue(issue_key)
def _get_matching_jira_server_user_by_external_actor(
self,
client: JiraServerClient,
external_issue_key: str,
user: RpcUser,
integration_id: int,
) -> dict[str, Any] | None:
logging_context = {
"integration_id": integration_id,
"organization_id": self.organization.id,
"issue_key": external_issue_key,
}
external_actors = ExternalActor.objects.filter(
organization_id=self.organization.id,
integration_id=self.model.id,
provider=ExternalProviders.JIRA_SERVER.value,
user_id=user.id,
)
if len(external_actors) > 1:
logger.warning(
"jira_server.user_external_actor.multiple_actors",
extra={
**logging_context,
"user_id": user.id,
},
)
return None
external_actor: ExternalActor | None = external_actors.first()
if external_actor is None:
logger.debug(
"jira_server.user_external_actor.no_actor",
extra={**logging_context, "user_id": user.id},
)
return None
possible_users: list[dict[str, Any]] = client.search_users_for_issue(
external_issue_key, external_actor.external_name
)
for possible_user in possible_users:
name = possible_user.get("name")
if name is None:
continue
if name.lower() == external_actor.external_name.lower():
return possible_user
return None
def _get_matching_jira_server_user_by_email(
self,
external_issue_key: str,
client: JiraServerClient,
user: RpcUser,
integration_id: int,
) -> dict[str, Any] | None:
logging_context = {
"integration_id": integration_id,
"organization_id": self.organization_id,
"issue_key": external_issue_key,
}
logging_context["user_id"] = user.id
logging_context["user_email_count"] = len(user.emails)
jira_user = None
for ue in user.emails:
assert ue, "Expected a valid user email, received falsy value"
possible_users = client.search_users_for_issue(external_issue_key, ue)
for possible_user in possible_users:
# Continue matching on email address, since we can't guarantee
# a clean match.
email = possible_user.get("emailAddress")
if not email:
continue
# match on lowercase email
if email.lower() == ue.lower():
jira_user = possible_user
break
return jira_user
def _get_matching_jira_server_user(
self,
client: JiraServerClient,
external_issue_key: str,
user: RpcUser,
integration_id: int,
) -> dict[str, Any] | None:
logging_context = {
"integration_id": integration_id,
"organization_id": self.organization_id,
"issue_key": external_issue_key,
}
try:
possible_user = self._get_matching_jira_server_user_by_external_actor(
client=client,
external_issue_key=external_issue_key,
user=user,
integration_id=integration_id,
)
if possible_user is not None:
return possible_user
possible_user = self._get_matching_jira_server_user_by_email(
client=client,
external_issue_key=external_issue_key,
user=user,
integration_id=integration_id,
)
return possible_user
except ApiUnauthorized:
logger.info(
"jira.user-search.unauthorized",
extra={
**logging_context,
},
)
except ApiError as e:
logger.warning(
"jira.user-search.request-error",
extra={
**logging_context,
"error": str(e),
},
)
return None
def sync_assignee_outbound(
self,
external_issue: ExternalIssue,
user: RpcUser | None,
assign: bool = True,
**kwargs: Any,
) -> None:
"""
Propagate a sentry issue's assignee to a jira issue's assignee
"""
client = self.get_client()
logging_context = {
"integration_id": external_issue.integration_id,
"organization_id": self.organization_id,
"issue_key": external_issue.key,
}
jira_user = None
if user and assign:
logging_context["user_id"] = user.id
logging_context["user_email_count"] = len(user.emails)
jira_user = self._get_matching_jira_server_user(
external_issue_key=external_issue.key,
client=client,
user=user,
integration_id=external_issue.integration_id,
)
if jira_user is None:
# TODO(jess): do we want to email people about these types of failures?
logger.info(
"jira_server.assignee-not-found",
extra=logging_context,
)
raise IntegrationSyncTargetNotFound("No matching Jira Server user found")
try:
id_field = client.user_id_field()
client.assign_issue(external_issue.key, jira_user and jira_user.get(id_field))
except ApiUnauthorized as e:
logger.info(
"jira_server.user-assignment-unauthorized",
extra={
**logging_context,
},
)
raise IntegrationConfigurationError(
"Insufficient permissions to assign user to Jira Server issue"
) from e
except ApiError as e:
logger.info(
"jira_server.user-assignment-request-error",
extra={
**logging_context,
"error": str(e),
},
)
raise IntegrationError("Failed to assign user to Jira Server issue") from e
def sync_status_outbound(
self, external_issue: ExternalIssue, is_resolved: bool, project_id: int
) -> None:
"""
Propagate a sentry issue's status to a linked issue's status.
"""
client = self.get_client()
jira_issue = client.get_issue(external_issue.key)
jira_project = jira_issue["fields"]["project"]
external_project = integration_service.get_integration_external_project(
organization_id=external_issue.organization_id,
integration_id=external_issue.integration_id,
external_id=jira_project["id"],
)
if not external_project:
return
jira_status = (
external_project.resolved_status if is_resolved else external_project.unresolved_status
)
# don't bother updating if it's already the status we'd change it to
if jira_issue["fields"]["status"]["id"] == jira_status:
return
try:
transitions = client.get_transitions(external_issue.key)
except ApiHostError:
raise IntegrationError("Could not reach host to get transitions.")
try:
transition = [t for t in transitions if t.get("to", {}).get("id") == jira_status][0]
except IndexError:
# TODO(jess): Email for failure
logger.warning(
"jira.status-sync-fail",
extra={
"organization_id": external_issue.organization_id,
"integration_id": external_issue.integration_id,
"issue_key": external_issue.key,
"transitions": transitions,
"jira_status": jira_status,
},
)
return
client.transition_issue(external_issue.key, transition["id"])
def _get_done_statuses(self):
client = self.get_client()
statuses = client.get_valid_statuses()
return {s["id"] for s in statuses if s["statusCategory"]["key"] == "done"}
def get_resolve_sync_action(self, data: Mapping[str, Any]) -> ResolveSyncAction:
done_statuses = self._get_done_statuses()
c_from = data["changelog"]["from"]
c_to = data["changelog"]["to"]
return ResolveSyncAction.from_resolve_unresolve(
should_resolve=c_to in done_statuses and c_from not in done_statuses,
should_unresolve=c_from in done_statuses and c_to not in done_statuses,
)
def after_link_issue(self, external_issue, data=None, **kwargs):
super().after_link_issue(external_issue, **kwargs)
if data:
comment = data.get("comment")
if comment:
self.get_client().create_comment(external_issue.key, comment)
def migrate_issues(self):
migrate_issues.apply_async(
kwargs={
"integration_id": self.model.id,
"organization_id": self.organization_id,
}
)
|
JiraServerIntegration
|
python
|
pyparsing__pyparsing
|
pyparsing/exceptions.py
|
{
"start": 10027,
"end": 10332
}
|
class ____(ParseFatalException):
"""
Just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
|
ParseSyntaxException
|
python
|
django__django
|
django/db/models/fields/json.py
|
{
"start": 23551,
"end": 23667
}
|
class ____(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith
):
pass
|
KeyTransformIEndsWith
|
python
|
numpy__numpy
|
numpy/_core/tests/test_numerictypes.py
|
{
"start": 12939,
"end": 13361
}
|
class ____:
def _bad_call(self):
ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
return ary['f0', 'f1']
def test_no_tuple(self):
assert_raises(IndexError, self._bad_call)
def test_return(self):
ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
res = ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
|
TestMultipleFields
|
python
|
pytorch__pytorch
|
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
|
{
"start": 48026,
"end": 48248
}
|
class ____(RendezvousBackend):
@property
def name(self):
return "dummy_backend"
def get_state(self):
return None
def set_state(self, state, token):
return None
|
DummyRendezvousBackend
|
python
|
pytorch__pytorch
|
test/distributed/elastic/rendezvous/dynamic_rendezvous_test.py
|
{
"start": 1449,
"end": 1814
}
|
class ____:
assertDictEqual: Callable
def assert_state_equal(
self, actual: _RendezvousState, expected: _RendezvousState
) -> None:
self.assertDictEqual(vars(actual), vars(expected))
def assert_state_empty(self, actual: _RendezvousState) -> None:
self.assertDictEqual(vars(actual), vars(_RendezvousState()))
|
CustomAssertMixin
|
python
|
keras-team__keras
|
keras/src/trainers/data_adapters/py_dataset_adapter_test.py
|
{
"start": 1675,
"end": 2466
}
|
class ____(py_dataset_adapter.PyDataset):
def __init__(self, inputs, batch_size=32, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.batch_size = batch_size
@property
def num_batches(self):
return math.ceil(len(self.inputs["x"]) / self.batch_size)
def __getitem__(self, idx):
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.inputs["x"]))
batch_x = self.inputs["x"][low:high]
batch_y = self.inputs["y"][low:high]
batch = {"x": batch_x, "y": batch_y}
return batch
|
DictPyDataset
|
python
|
python-attrs__attrs
|
tests/test_utils.py
|
{
"start": 34,
"end": 440
}
|
class ____:
"""
Tests for the testing helper function `make_class`.
"""
def test_returns_class(self):
"""
Returns a class object.
"""
assert type is simple_class().__class__
def test_returns_distinct_classes(self):
"""
Each call returns a completely new class.
"""
assert simple_class() is not simple_class()
|
TestSimpleClass
|
python
|
pytorch__pytorch
|
test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_misc.py
|
{
"start": 170,
"end": 1350
}
|
class ____(TestCase):
def test_backend_module_name(self):
self.assertEqual(torch._C._get_privateuse1_backend_name(), "openreg")
# backend can be renamed to the same name multiple times
torch.utils.rename_privateuse1_backend("openreg")
with self.assertRaisesRegex(RuntimeError, "has already been set"):
torch.utils.rename_privateuse1_backend("dev")
def test_backend_module_registration(self):
def generate_faked_module():
return types.ModuleType("fake_module")
with self.assertRaisesRegex(RuntimeError, "Expected one of cpu"):
torch._register_device_module("dev", generate_faked_module())
with self.assertRaisesRegex(RuntimeError, "The runtime module of"):
torch._register_device_module("openreg", generate_faked_module())
def test_backend_module_function(self):
with self.assertRaisesRegex(RuntimeError, "Try to call torch.openreg"):
torch.utils.backend_registration._get_custom_mod_func("func_name_")
self.assertTrue(
torch.utils.backend_registration._get_custom_mod_func("device_count")() == 2
)
|
TestBackendModule
|
python
|
apache__airflow
|
airflow-core/src/airflow/api_fastapi/core_api/datamodels/plugins.py
|
{
"start": 2797,
"end": 4094
}
|
class ____(BaseModel):
"""Plugin serializer."""
name: str
macros: list[str]
flask_blueprints: list[str]
fastapi_apps: list[FastAPIAppResponse]
fastapi_root_middlewares: list[FastAPIRootMiddlewareResponse]
external_views: list[ExternalViewResponse] = Field(
description="Aggregate all external views. Both 'external_views' and 'appbuilder_menu_items' are included here."
)
react_apps: list[ReactAppResponse]
appbuilder_views: list[AppBuilderViewResponse]
appbuilder_menu_items: list[AppBuilderMenuItemResponse] = Field(
deprecated="Kept for backward compatibility, use `external_views` instead.",
)
global_operator_extra_links: list[str]
operator_extra_links: list[str]
source: Annotated[str, BeforeValidator(coerce_to_string)]
listeners: list[str]
timetables: list[str]
@field_validator("source", mode="before")
@classmethod
def convert_source(cls, data: Any) -> Any:
if isinstance(data, AirflowPluginSource):
return str(data)
return data
@model_validator(mode="before")
@classmethod
def convert_external_views(cls, data: Any) -> Any:
data["external_views"] = [*data["external_views"], *data["appbuilder_menu_items"]]
return data
|
PluginResponse
|
python
|
getsentry__sentry
|
src/sentry/newsletter/dummy.py
|
{
"start": 2021,
"end": 4190
}
|
class ____(Newsletter):
"""
The ``DummyNewsletter`` implementation is primarily used for test cases. It uses a in-memory
store for tracking subscriptions, which means its not suitable for any real production use-case.
"""
def __init__(self, enabled: bool = False) -> None:
self._subscriptions: dict[User, dict[int, NewsletterSubscription]] = defaultdict(dict)
self._enabled = enabled
@contextlib.contextmanager
def enable(self) -> Generator[None]:
self._enabled = True
try:
yield
finally:
self._enabled = False
def clear(self):
self._subscriptions = defaultdict(dict)
def is_enabled(self):
return self._enabled
def get_subscriptions(self, user: User):
return {"subscriptions": list((self._subscriptions.get(user) or {}).values())}
def update_subscription(
self,
user: User,
list_id: int | None = None,
create: bool | None = False,
**kwargs: Any,
) -> dict[int, NewsletterSubscription]:
if not list_id:
list_id = self.get_default_list_id()
if create:
self._subscriptions[user].setdefault(
list_id, NewsletterSubscription(user, list_id, subscribed=True)
)
self._subscriptions[user][list_id].update(**kwargs)
return self._subscriptions[user]
def update_subscriptions(
self,
user: User,
list_ids: Sequence[int] | None = None,
create: bool | None = False,
**kwargs: Any,
):
if not list_ids:
list_ids = self.get_default_list_ids()
for list_id in list_ids:
self.update_subscription(user, list_id, create, **kwargs)
return self._subscriptions[user]
def optout_email(self, email: str, **kwargs: Any) -> None:
unsubscribe_date = timezone.now()
for by_list in self._subscriptions.values():
for subscription in by_list.values():
if subscription.email == email:
subscription.update(subscribed=False, unsubscribe_date=unsubscribe_date)
|
DummyNewsletter
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/private.py
|
{
"start": 285,
"end": 557
}
|
class ____:
#: A public class attribute whose name starts with an underscore.
#:
#: :meta public:
_public_attribute = 47
#: A private class attribute whose name does not start with an underscore.
#:
#: :meta private:
private_attribute = 11
|
Foo
|
python
|
langchain-ai__langchain
|
libs/text-splitters/langchain_text_splitters/markdown.py
|
{
"start": 11537,
"end": 11649
}
|
class ____(TypedDict):
"""Header type as typed dict."""
level: int
name: str
data: str
|
HeaderType
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dataplex.py
|
{
"start": 177706,
"end": 183979
}
|
class ____(DataplexCatalogBaseOperator):
"""
Update an Entry resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogUpdateEntryOperator`
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param location: Required. The ID of the Google Cloud region that the task belongs to.
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their
associated resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry belongs to.
:param entry_configuration: Required. The updated configuration body of the Entry.
:param allow_missing: Optional. If set to true and entry doesn't exist, the service will create it.
:param delete_missing_aspects: Optional. If set to true and the aspect_keys specify aspect
ranges, the service deletes any existing aspects from that range that were not provided
in the request.
:param aspect_keys: Optional. The map keys of the Aspects which the service should modify.
It supports the following syntax:
- ``<aspect_type_reference>`` - matches an aspect of the given type and empty path.
- ``<aspect_type_reference>@path`` - matches an aspect of the given type and specified path.
For example, to attach an aspect to a field that is specified by the ``schema``
aspect, the path should have the format ``Schema.<field_name>``.
- ``<aspect_type_reference>@*`` - matches aspects of the given type for all paths.
- ``*@path`` - matches aspects of all types on the given path.
The service will not remove existing aspects matching the syntax unless ``delete_missing_aspects``
is set to true.
If this field is left empty, the service treats it as specifying exactly those Aspects present
in the request.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests
will not be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param gcp_conn_id: Optional. The connection ID to use when fetching connection info.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"entry_id", "entry_group_id", "entry_configuration"}
| set(DataplexCatalogBaseOperator.template_fields)
)
operator_extra_links = (DataplexCatalogEntryLink(),)
def __init__(
self,
entry_id: str,
entry_group_id: str,
entry_configuration: dict | Entry,
allow_missing: bool | None = False,
delete_missing_aspects: bool | None = False,
aspect_keys: MutableSequence[str] | None = None,
update_mask: list[str] | FieldMask | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_id = entry_id
self.entry_group_id = entry_group_id
self.entry_configuration = entry_configuration
self.update_mask = update_mask
self.allow_missing = allow_missing
self.delete_missing_aspects = delete_missing_aspects
self.aspect_keys = aspect_keys
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"entry_id": self.entry_id,
"entry_group_id": self.entry_group_id,
}
def execute(self, context: Context):
DataplexCatalogEntryLink.persist(context=context)
try:
entry = self.hook.update_entry(
location=self.location,
project_id=self.project_id,
entry_id=self.entry_id,
entry_group_id=self.entry_group_id,
entry_configuration=self.entry_configuration,
update_mask=self.update_mask,
allow_missing=self.allow_missing,
delete_missing_aspects=self.delete_missing_aspects,
aspect_keys=self.aspect_keys,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound as ex:
self.log.info("Specified Entry was not found.")
raise AirflowException(ex)
except Exception as exc:
raise AirflowException(exc)
else:
result = Entry.to_dict(entry)
self.log.info("Entry %s was successfully updated.", self.entry_id)
return result
|
DataplexCatalogUpdateEntryOperator
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/objects/fn_node_mapping.py
|
{
"start": 262,
"end": 1933
}
|
class ____(BaseObjectNodeMapping[Any]):
"""Fn node mapping."""
def __init__(
self,
from_node_fn: Callable[[BaseNode], Any],
to_node_fn: Callable[[Any], BaseNode],
) -> None:
self._to_node_fn = to_node_fn
self._from_node_fn = from_node_fn
@classmethod
def from_objects( # type: ignore
cls,
objs: Sequence[Any],
from_node_fn: Callable[[BaseNode], Any],
to_node_fn: Callable[[Any], BaseNode],
*args: Any,
**kwargs: Any,
) -> "BaseObjectNodeMapping":
"""Initialize node mapping."""
return cls(from_node_fn, to_node_fn)
def _add_object(self, obj: Any) -> None:
"""Add object. NOTE: unused."""
def to_node(self, obj: Any) -> BaseNode:
"""To node."""
return self._to_node_fn(obj)
def _from_node(self, node: BaseNode) -> Any:
"""From node."""
return self._from_node_fn(node)
@property
def obj_node_mapping(self) -> Dict[int, Any]:
"""The mapping data structure between node and object."""
raise NotImplementedError("FnNodeMapping does not support obj_node_mapping")
def persist(
self, persist_dir: str = ..., obj_node_mapping_fname: str = ...
) -> None:
"""Persist objs."""
raise NotImplementedError("FnNodeMapping does not support persist method.")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> "FnNodeMapping":
raise NotImplementedError("FnNodeMapping does not support persist method.")
|
FnNodeMapping
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reducing-dishes.py
|
{
"start": 33,
"end": 402
}
|
class ____(object):
def maxSatisfaction(self, satisfaction):
"""
:type satisfaction: List[int]
:rtype: int
"""
satisfaction.sort(reverse=True)
result, curr = 0, 0
for x in satisfaction:
curr += x
if curr <= 0:
break
result += curr
return result
|
Solution
|
python
|
pallets__click
|
src/click/core.py
|
{
"start": 57465,
"end": 57721
}
|
class ____(type):
def __subclasscheck__(cls, subclass: type) -> bool:
return issubclass(subclass, cls.__bases__[0])
def __instancecheck__(cls, instance: t.Any) -> bool:
return isinstance(instance, cls.__bases__[0])
|
_FakeSubclassCheck
|
python
|
pytorch__pytorch
|
torch/_inductor/fx_passes/memory_estimator.py
|
{
"start": 390,
"end": 768
}
|
class ____:
storage: torch.UntypedStorage
device: torch.device
def __hash__(self) -> int:
return self.storage._cdata
def __eq__(self, other: object) -> bool:
if not isinstance(other, StorageKey):
return False
return (
self.storage._cdata == other.storage._cdata and self.device == other.device
)
|
StorageKey
|
python
|
conda__conda
|
conda/plugins/types.py
|
{
"start": 8394,
"end": 8832
}
|
class ____(CondaPlugin):
"""
Return type to use when the defining the conda auth handlers hook.
:param name: Name (e.g., ``basic-auth``). This name should be unique
and only one may be registered at a time.
:param handler: Type that will be used as the authentication handler
during network requests.
"""
name: str
handler: type[ChannelAuthBase]
@dataclass
|
CondaAuthHandler
|
python
|
pytorch__pytorch
|
torch/utils/mobile_optimizer.py
|
{
"start": 237,
"end": 6414
}
|
class ____(Enum):
BUNDLED_INPUT = 1
REQUIRES_GRAD = 2
DROPOUT = 3
BATCHNORM = 4
def optimize_for_mobile(
script_module: torch.jit.ScriptModule,
optimization_blocklist: set[MobileOptimizerType] | None = None,
preserved_methods: list[AnyStr] | None = None,
backend: str = 'CPU') -> torch.jit.RecursiveScriptModule:
"""
Optimize a torch script module for mobile deployment.
Args:
script_module: An instance of torch script module with type of ScriptModule.
optimization_blocklist: A set with type of MobileOptimizerType. When set is not passed,
optimization method will run all the optimizer pass; otherwise, optimizer
method will run the optimization pass that is not included inside optimization_blocklist.
preserved_methods: A list of methods that needed to be preserved when freeze_module pass is invoked
backend: Device type to use for running the result model ('CPU'(default), 'Vulkan' or 'Metal').
Returns:
A new optimized torch script module
"""
if not isinstance(script_module, torch.jit.ScriptModule):
raise TypeError(
f'Got {type(script_module)}, but ScriptModule is expected.')
if optimization_blocklist is None:
optimization_blocklist = set()
if preserved_methods is None:
preserved_methods = []
# Convert potential byte arrays into strings (if there is any) to pass type checking
# Here we use a new name as assigning it back to preserved_methods will invoke
# mypy errors (i.e. List[AnyStr] = List[str])
preserved_methods_str: list[str] = [str(method) for method in preserved_methods]
bundled_inputs_attributes = _get_bundled_inputs_preserved_attributes(script_module, preserved_methods_str)
if all(hasattr(script_module, method) for method in bundled_inputs_attributes):
preserved_methods_str = list(set(preserved_methods_str + bundled_inputs_attributes))
non_exist_methods = [method for method in preserved_methods_str if not hasattr(script_module, method)]
if non_exist_methods:
raise AttributeError(
f"The following methods to preserve do not exist in script_module: {', '.join(non_exist_methods)}")
backend = backend.lower()
if backend == 'cpu':
optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile(
script_module._c,
optimization_blocklist,
preserved_methods_str)
elif backend == 'vulkan':
optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(
script_module._c,
optimization_blocklist,
preserved_methods_str)
elif backend == 'metal':
optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str)
else:
raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'")
return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module)
def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):
"""
Generate a list of lints for a given torch script module.
Args:
script_module: An instance of torch script module with type of ScriptModule.
Returns:
lint_map: A list of dictionary that contains modules lints
"""
if not isinstance(script_module, torch.jit.ScriptModule):
raise TypeError(
f'Got {type(script_module)}, but ScriptModule is expected.')
lint_list = []
if not hasattr(script_module, "_generate_bundled_inputs_for_forward"):
lint_list.append({"name": LintCode.BUNDLED_INPUT.name, "message": "No bundled input for forward, please add bundled inputs "
"before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs."})
for name, param in script_module.named_parameters():
if param.requires_grad:
lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, "
"please set torch.no_grad() to reduce memory usage and improve computation speed during "
"inference phase."})
op_names = torch.jit.export_opnames(script_module)
for op_name in op_names:
if "dropout" in op_name:
lint_list.append({"name": LintCode.DROPOUT.name,
"message": f"Operator {op_name} exists, remember to call eval() before "
"saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout "
"operator."})
if "batch_norm" in op_name:
lint_list.append({"name": LintCode.BATCHNORM.name,
"message": f"Operator {op_name} exists, remember to call eval() before "
"saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm "
"operator."})
return lint_list
def _get_bundled_inputs_preserved_attributes(script_module: torch.jit.ScriptModule, preserved_methods: list[str]) -> list[str]:
bundled_inputs_attributes = []
# Has bundled inputs for forward
if hasattr(script_module, 'get_all_bundled_inputs'):
bundled_inputs_attributes.append('get_all_bundled_inputs')
bundled_inputs_attributes.append('get_num_bundled_inputs')
# Bundled inputs in module after the change that introduced bundled inputs for multiple functions
if hasattr(script_module, 'get_bundled_inputs_functions_and_info'):
bundled_inputs_attributes.append('get_bundled_inputs_functions_and_info')
all_info = script_module.get_bundled_inputs_functions_and_info()
for function_name in all_info:
if function_name not in preserved_methods:
bundled_inputs_attributes.append(function_name)
bundled_inputs_attributes.append("get_all_bundled_inputs_for_" + function_name)
bundled_inputs_attributes.append("_bundled_inputs_deflated_" + function_name)
return bundled_inputs_attributes
|
LintCode
|
python
|
getsentry__sentry
|
src/sentry/integrations/jira/utils/create_issue_schema_transformers.py
|
{
"start": 297,
"end": 4876
}
|
class ____(Exception):
pass
def parse_number_field(num_str: Any) -> int | float:
try:
if isinstance(num_str, str) and "." in num_str:
return float(num_str)
return int(num_str)
except ValueError:
raise JiraSchemaParseError(f"Invalid number value provided for field: '{num_str}'")
TransformerType = Mapping[str, Callable[[Any], Any]]
T = TypeVar("T")
def identity_transformer(input_val: T) -> T:
return input_val
def id_obj_transformer(input_val: Any) -> dict[str, Any]:
return {"id": input_val}
def get_type_transformer_mappings(user_id_field: str) -> TransformerType:
transformers = {
JiraSchemaTypes.user.value: lambda x: {user_id_field: x},
JiraSchemaTypes.issue_type.value: id_obj_transformer,
JiraSchemaTypes.option.value: lambda x: {"value": x},
JiraSchemaTypes.issue_link.value: lambda x: {"key": x},
JiraSchemaTypes.project.value: id_obj_transformer,
JiraSchemaTypes.number.value: parse_number_field,
JiraSchemaTypes.priority.value: id_obj_transformer,
JiraSchemaTypes.version.value: id_obj_transformer,
JiraSchemaTypes.component: id_obj_transformer,
}
return transformers
def get_custom_field_transformer_mappings() -> TransformerType:
transformers = {
JIRA_CUSTOM_FIELD_TYPES["tempo_account"]: parse_number_field,
JIRA_CUSTOM_FIELD_TYPES["sprint"]: parse_number_field,
JIRA_CUSTOM_FIELD_TYPES["rank"]: id_obj_transformer,
}
return transformers
def get_transformer_for_field(
type_transformers: TransformerType, custom_transformers: TransformerType, jira_field: JiraField
) -> Callable[[Any], Any]:
transformer = None
if jira_field.is_custom_field():
assert jira_field.schema.custom
transformer = custom_transformers.get(jira_field.schema.custom)
if not transformer:
field_type = jira_field.get_field_type()
if field_type:
transformer = type_transformers.get(field_type)
if not transformer:
transformer = identity_transformer
return transformer
def transform_fields(
user_id_field: str, jira_fields: Iterable[JiraField], **data
) -> Mapping[str, Any]:
transformed_data = {}
# Special handling for fields that don't map cleanly to the transformer logic
# Also, we need to truncate the title field to prevent Jira from erroring
# when it's too long.
title = data.get("title")
data["summary"] = title[:255] if title else None
if labels := data.get("labels"):
data["labels"] = [label.strip() for label in labels.split(",") if label.strip()]
type_transformers = get_type_transformer_mappings(user_id_field)
custom_field_transformers = get_custom_field_transformer_mappings()
lowercased_data = {k.lower(): v for k, v in data.items()}
for field in jira_fields:
field_data = lowercased_data.get(field.key.lower())
# Skip any values that indicate no value should be provided.
# We have some older alert templates with "" values, which will raise
# if we don't skip them.
if field_data is None or field_data == "":
continue
field_transformer = get_transformer_for_field(
type_transformers, custom_field_transformers, field
)
try:
# Handling for array types and their nested subtypes.
# We have to skip this handling for `sprint` custom fields, as they
# are the only `array` type that expects a number, not a list.
if (
field.schema.schema_type.lower() == JiraSchemaTypes.array
and field.schema.custom != JIRA_CUSTOM_FIELD_TYPES["sprint"]
):
transformed_value = []
# Occasionally, our UI passes a string instead of a list, so we
# have to just wrap it and hope it's in the correct format.
if not isinstance(field_data, list):
field_data = [field_data]
# Bulk transform the individual data fields
for val in field_data:
transformed_value.append(field_transformer(val))
else:
transformed_value = field_transformer(field_data)
except JiraSchemaParseError as e:
raise IntegrationFormError(field_errors={field.name: str(e)}) from e
if transformed_value is not None:
transformed_data[field.key] = transformed_value
return transformed_data
|
JiraSchemaParseError
|
python
|
bokeh__bokeh
|
src/bokeh/core/query.py
|
{
"start": 7642,
"end": 7963
}
|
class ____(_Operator):
''' Predicate to test if property values are less than some value.
Construct and ``LT`` predicate as a dict with ``LT`` as the key,
and a value to compare against.
.. code-block:: python
# matches any models with .size < 10
dict(size={ LT: 10 })
'''
pass
|
LT
|
python
|
kamyu104__LeetCode-Solutions
|
Python/k-divisible-elements-subarrays.py
|
{
"start": 804,
"end": 1720
}
|
class ____(object):
def countDistinct(self, nums, k, p):
"""
:type nums: List[int]
:type k: int
:type p: int
:rtype: int
"""
MOD, P = 10**9+7, 113
def check(nums, lookup, l, i):
return all(any(nums[i+k] != nums[j+k] for k in xrange(l)) for j in lookup)
result = 0
cnt, h = [0]*len(nums), [0]*len(nums)
for l in xrange(1, len(nums)+1):
lookup = collections.defaultdict(list)
for i in xrange(len(nums)-l+1):
cnt[i] += (nums[i+l-1]%p == 0)
if cnt[i] > k:
continue
h[i] = (h[i]*P+nums[i+l-1])%MOD
if not check(nums, lookup[h[i]], l, i):
continue
lookup[h[i]].append(i)
result += 1
return result
# Time: O(n^2)
# Space: O(n)
# rolling hash
|
Solution2
|
python
|
jina-ai__jina
|
tests/integration/streaming/test_streaming.py
|
{
"start": 1520,
"end": 3999
}
|
class ____(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(5):
yield Document(text=f'{doc.text} {i}')
await asyncio.sleep(0.5)
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
@pytest.mark.parametrize('reuse_session', [False, True])
async def test_streaming_delay(protocol, include_gateway, reuse_session):
if reuse_session and protocol != 'http':
return
from jina import Deployment
port = random_port()
with Deployment(
uses=WaitStreamExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True, reuse_session=reuse_session)
i = 0
start_time = time.time()
async for doc in client.stream_doc(
on='/hello', inputs=Document(text='hello world')
):
assert doc.text == f'hello world {i}'
i += 1
# 0.5 seconds between each request + 0.5 seconds tolerance interval
assert time.time() - start_time < (0.5 * i) + 0.5
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['grpc'])
async def test_streaming_client_non_gen_endpoint(protocol):
from jina import Deployment
port = random_port()
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=False,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 0
with pytest.raises(BadServer):
async for _ in client.stream_doc(
on='/world', inputs=Document(text='hello world')
):
pass
def test_invalid_executor():
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor3(Executor):
@requests(on='/invalid')
async def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
with pytest.raises(RuntimeError) as exc_info:
class InvalidExecutor4(Executor):
@requests(on='/invalid')
def invalid(self, docs: DocumentArray, **kwargs):
yield docs[0]
assert type(exc_info.value.__cause__) is AssertionError
|
WaitStreamExecutor
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeNarrowingTypeEquals1.py
|
{
"start": 1210,
"end": 1613
}
|
class ____:
pass
def func6(val: AFinal | BFinal) -> None:
if type(val) == AFinal:
reveal_type(val, expected_text="AFinal")
else:
reveal_type(val, expected_text="BFinal")
def func7(val: Any):
if type(val) == int:
reveal_type(val, expected_text="int")
else:
reveal_type(val, expected_text="Any")
reveal_type(val, expected_text="int | Any")
|
BFinal
|
python
|
getsentry__sentry
|
fixtures/page_objects/base.py
|
{
"start": 100,
"end": 409
}
|
class ____:
"""Base class for PageObjects"""
def __init__(self, browser: Browser):
self.browser = browser
@property
def driver(self):
return self.browser.driver
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
|
BasePage
|
python
|
sympy__sympy
|
sympy/functions/special/bessel.py
|
{
"start": 30139,
"end": 32635
}
|
class ____(SphericalBesselBase):
r"""
Spherical Bessel function of the first kind.
Explanation
===========
This function is a solution to the spherical Bessel equation
.. math ::
z^2 \frac{\mathrm{d}^2 w}{\mathrm{d}z^2}
+ 2z \frac{\mathrm{d}w}{\mathrm{d}z} + (z^2 - \nu(\nu + 1)) w = 0.
It can be defined as
.. math ::
j_\nu(z) = \sqrt{\frac{\pi}{2z}} J_{\nu + \frac{1}{2}}(z),
where $J_\nu(z)$ is the Bessel function of the first kind.
The spherical Bessel functions of integral order are
calculated using the formula:
.. math:: j_n(z) = f_n(z) \sin{z} + (-1)^{n+1} f_{-n-1}(z) \cos{z},
where the coefficients $f_n(z)$ are available as
:func:`sympy.polys.orthopolys.spherical_bessel_fn`.
Examples
========
>>> from sympy import Symbol, jn, sin, cos, expand_func, besselj, bessely
>>> z = Symbol("z")
>>> nu = Symbol("nu", integer=True)
>>> print(expand_func(jn(0, z)))
sin(z)/z
>>> expand_func(jn(1, z)) == sin(z)/z**2 - cos(z)/z
True
>>> expand_func(jn(3, z))
(-6/z**2 + 15/z**4)*sin(z) + (1/z - 15/z**3)*cos(z)
>>> jn(nu, z).rewrite(besselj)
sqrt(2)*sqrt(pi)*sqrt(1/z)*besselj(nu + 1/2, z)/2
>>> jn(nu, z).rewrite(bessely)
(-1)**nu*sqrt(2)*sqrt(pi)*sqrt(1/z)*bessely(-nu - 1/2, z)/2
>>> jn(2, 5.2+0.3j).evalf(20)
0.099419756723640344491 - 0.054525080242173562897*I
See Also
========
besselj, bessely, besselk, yn
References
==========
.. [1] https://dlmf.nist.gov/10.47
"""
@classmethod
def eval(cls, nu, z):
if z.is_zero:
if nu.is_zero:
return S.One
elif nu.is_integer:
if nu.is_positive:
return S.Zero
else:
return S.ComplexInfinity
if z in (S.NegativeInfinity, S.Infinity):
return S.Zero
def _eval_rewrite_as_besselj(self, nu, z, **kwargs):
return sqrt(pi/(2*z)) * besselj(nu + S.Half, z)
def _eval_rewrite_as_bessely(self, nu, z, **kwargs):
return S.NegativeOne**nu * sqrt(pi/(2*z)) * bessely(-nu - S.Half, z)
def _eval_rewrite_as_yn(self, nu, z, **kwargs):
return S.NegativeOne**(nu) * yn(-nu - 1, z)
def _expand(self, **hints):
return _jn(self.order, self.argument)
def _eval_evalf(self, prec):
if self.order.is_Integer:
return self.rewrite(besselj)._eval_evalf(prec)
|
jn
|
python
|
getsentry__sentry
|
src/sentry/notifications/models/notificationaction.py
|
{
"start": 861,
"end": 1286
}
|
class ____(IntEnum):
@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
raise NotImplementedError
@classmethod
def get_name(cls, value: int) -> str | None:
return dict(cls.as_choices()).get(value)
@classmethod
def get_value(cls, name: str) -> int | None:
invert_choices = {v: k for k, v in cls.as_choices()}
return invert_choices.get(name)
|
FlexibleIntEnum
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_truncation_retention_ratio.py
|
{
"start": 252,
"end": 691
}
|
class ____(BaseModel):
post_instructions: Optional[int] = None
"""
Maximum tokens allowed in the conversation after instructions (which including
tool definitions). For example, setting this to 5,000 would mean that truncation
would occur when the conversation exceeds 5,000 tokens after instructions. This
cannot be higher than the model's context window size minus the maximum output
tokens.
"""
|
TokenLimits
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 53014,
"end": 53596
}
|
class ____(Block):
"""Base class for the sidebar and main body containers."""
def __init__(
self,
proto: BlockProto | None,
root: ElementTree,
type: str | None = None,
) -> None:
self.children = {}
self.proto = proto
if type:
self.type = type
elif proto and proto.WhichOneof("type"):
ty = proto.WhichOneof("type")
assert ty is not None
self.type = ty
else:
self.type = "unknown"
self.root = root
@dataclass(repr=False)
|
SpecialBlock
|
python
|
kamyu104__LeetCode-Solutions
|
Python/matrix-block-sum.py
|
{
"start": 37,
"end": 773
}
|
class ____(object):
def matrixBlockSum(self, mat, K):
"""
:type mat: List[List[int]]
:type K: int
:rtype: List[List[int]]
"""
m, n = len(mat), len(mat[0])
accu = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(m):
for j in xrange(n):
accu[i+1][j+1] = accu[i+1][j]+accu[i][j+1]-accu[i][j]+mat[i][j]
result = [[0 for _ in xrange(n)] for _ in xrange(m)]
for i in xrange(m):
for j in xrange(n):
r1, c1, r2, c2 = max(i-K, 0), max(j-K, 0), min(i+K+1, m), min(j+K+1, n)
result[i][j] = accu[r2][c2]-accu[r1][c2]-accu[r2][c1]+accu[r1][c1]
return result
|
Solution
|
python
|
agronholm__apscheduler
|
src/apscheduler/executors/thread.py
|
{
"start": 324,
"end": 900
}
|
class ____(JobExecutor):
"""
Executes functions in a thread pool.
:param max_workers: the maximum number of worker threads to keep
"""
max_workers: int = 40
_limiter: CapacityLimiter = attrs.field(init=False)
async def start(self, exit_stack: AsyncExitStack) -> None:
self._limiter = CapacityLimiter(self.max_workers)
async def run_job(self, func: Callable[..., Any], job: Job) -> Any:
wrapped = partial(func, *job.args, **job.kwargs)
return await to_thread.run_sync(wrapped, limiter=self._limiter)
|
ThreadPoolJobExecutor
|
python
|
google__pytype
|
pytype/tools/runner.py
|
{
"start": 72,
"end": 931
}
|
class ____:
"""Convenience wrapper around subprocess.
Use as:
ret, out, err = BinaryRun([exe, arg, ...]).communicate()
"""
def __init__(self, args, dry_run=False):
self.args = args
self.results = None
if dry_run:
self.results = (0, b"", b"")
else:
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
self.args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
def communicate(self):
if self.results:
# We are running in dry-run mode.
return self.results
stdout, stderr = self.proc.communicate()
self.results = self.proc.returncode, stdout, stderr
return self.results
def can_run(exe, *args):
"""Check if running exe with args works."""
try:
BinaryRun([exe] + list(args)).communicate()
return True
except OSError:
return False
|
BinaryRun
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/abstractClass10.py
|
{
"start": 616,
"end": 1116
}
|
class ____(A):
@staticmethod
def method1() -> None:
# This should generate an error.
return super(B).method1()
@staticmethod
def method2() -> None:
return super(B).method2()
@classmethod
def method3(cls) -> None:
# This should generate an error.
return super().method3()
@classmethod
def method4(cls) -> None:
return super().method4()
B.method1()
B.method2()
def func1(a: type[A]):
a.method1()
a.method3()
|
B
|
python
|
numba__numba
|
numba/tests/test_unsafe_intrinsics.py
|
{
"start": 4723,
"end": 5345
}
|
class ____(TestCase):
def test_dump_refcount(self):
@njit
def use_dump_refcount():
a = np.ones(10)
b = (a, a)
dump_refcount(a)
dump_refcount(b)
# Capture output to sys.stdout
with captured_stdout() as stream:
use_dump_refcount()
output = stream.getvalue()
# Check that it printed
pat = "dump refct of {}"
aryty = types.float64[::1]
tupty = types.Tuple.from_types([aryty] * 2)
self.assertIn(pat.format(aryty), output)
self.assertIn(pat.format(tupty), output)
|
TestRefCount
|
python
|
django-import-export__django-import-export
|
tests/core/tests/admin_integration/test_export.py
|
{
"start": 16533,
"end": 19962
}
|
class ____(TestCase):
mock_request = MagicMock(spec=HttpRequest)
mock_request.POST = {"django-import-export-format": 0, "bookresource_id": True}
class TestMixin(ExportMixin):
model = Book
def __init__(self, test_str=None):
self.test_str = test_str
def get_data_for_export(self, request, queryset, **kwargs):
dataset = Dataset(headers=["id", "name"])
dataset.append([1, self.test_str])
return dataset
def get_export_queryset(self, request):
return []
def get_export_filename(self, request, queryset, file_format):
return "f"
def setUp(self):
self.file_format = formats.base_formats.CSV()
self.export_mixin = self.TestMixin(test_str="teststr")
def test_to_encoding_not_set_default_encoding_is_utf8(self):
self.export_mixin = self.TestMixin(test_str="teststr")
data = self.export_mixin.get_export_data(
self.file_format, self.mock_request, []
)
csv_dataset = tablib.import_set(data)
self.assertEqual("teststr", csv_dataset.dict[0]["name"])
def test_to_encoding_set(self):
self.export_mixin = self.TestMixin(test_str="ハローワールド")
data = self.export_mixin.get_export_data(
self.file_format, self.mock_request, [], encoding="shift-jis"
)
encoding = chardet.detect(bytes(data))["encoding"]
self.assertEqual("SHIFT_JIS", encoding)
def test_to_encoding_set_incorrect(self):
self.export_mixin = self.TestMixin()
with self.assertRaises(LookupError):
self.export_mixin.get_export_data(
self.file_format,
self.mock_request,
[],
encoding="bad-encoding",
)
@ignore_utcnow_deprecation_warning
def test_to_encoding_not_set_for_binary_file(self):
self.export_mixin = self.TestMixin(test_str="teststr")
self.file_format = formats.base_formats.XLSX()
data = self.export_mixin.get_export_data(
self.file_format,
self.mock_request,
[],
)
binary_dataset = tablib.import_set(data)
self.assertEqual("teststr", binary_dataset.dict[0]["name"])
def test_export_action_to_encoding(self):
self.export_mixin.to_encoding = "utf-8"
with mock.patch(
"import_export.admin.ExportMixin.get_export_data"
) as mock_get_export_data:
self.export_mixin.export_action(self.mock_request)
encoding_kwarg = mock_get_export_data.call_args_list[0][1]["encoding"]
self.assertEqual("utf-8", encoding_kwarg)
@override_settings(IMPORT_EXPORT_SKIP_ADMIN_ACTION_EXPORT_UI=True)
def test_export_admin_action_to_encoding(self):
class TestExportActionMixin(ExportActionMixin):
def get_export_filename(self, request, queryset, file_format):
return "f"
self.export_mixin = TestExportActionMixin()
self.export_mixin.to_encoding = "utf-8"
with mock.patch(
"import_export.admin.ExportMixin.get_export_data"
) as mock_get_export_data:
self.export_mixin.export_admin_action(self.mock_request, [])
encoding_kwarg = mock_get_export_data.call_args_list[0][1]["encoding"]
self.assertEqual("utf-8", encoding_kwarg)
|
TestExportEncoding
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/backends/backend_tkcairo.py
|
{
"start": 164,
"end": 771
}
|
class ____(FigureCanvasCairo, FigureCanvasTk):
def draw(self):
width = int(self.figure.bbox.width)
height = int(self.figure.bbox.height)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
self._renderer.set_context(cairo.Context(surface))
self._renderer.dpi = self.figure.dpi
self.figure.draw(self._renderer)
buf = np.reshape(surface.get_data(), (height, width, 4))
_backend_tk.blit(
self._tkphoto, buf,
(2, 1, 0, 3) if sys.byteorder == "little" else (1, 2, 3, 0))
@_BackendTk.export
|
FigureCanvasTkCairo
|
python
|
spyder-ide__spyder
|
spyder/plugins/maininterpreter/container.py
|
{
"start": 881,
"end": 12326
}
|
class ____(PluginMainContainer):
sig_interpreter_changed = Signal(str)
"""
Signal to report that the main interpreter has changed.
Parameters
----------
path: str
Path to the new interpreter.
"""
sig_environments_updated = Signal(dict)
"""
This signal is emitted when the conda, pyenv or custom environments tracked
by this plugin were updated.
Parameters
----------
envs: dict
Environments dictionary in the format given by
:py:meth:`spyder.utils.envs.get_list_envs`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._interpreter = self.get_main_interpreter()
self._startup = True
self._current_envs = None
self._lock = QMutex()
self.path_to_env = {}
self.envs = {}
self.internal_interpreter = sys.executable
if os.name == 'nt' and is_conda_based_app():
# Be sure to use 'python' executable instead of 'pythonw' since
# no output is generated with 'pythonw'.
self.internal_interpreter = self.internal_interpreter.replace(
"pythonw.exe", "python.exe"
).lower()
# Worker to compute envs info in a thread
self._worker_manager = WorkerManager(self)
# Timer to get envs every minute
self._get_envs_timer = QTimer(self)
self._get_envs_timer.setInterval(60000)
self._get_envs_timer.timeout.connect(self._get_envs)
self._get_envs_timer.start()
# Timer to check the current interpreter
self._check_interpreter_timer = QTimer(self)
self._check_interpreter_timer.setInterval(2000)
self._check_interpreter_timer.start(2000)
# Update the list of envs at startup
self._get_envs()
# ---- PluginMainContainer API
# -------------------------------------------------------------------------
def setup(self):
pass
def update_actions(self):
pass
@on_conf_change(option=['default', 'custom_interpreter', 'custom'])
def on_interpreter_changed(self, option, value):
if (option == "default" and value) or (
option == "custom" and not value
):
executable = get_python_executable()
else:
executable = ""
if self.get_conf("custom"):
custom_interpreter = self.get_conf('custom_interpreter')
if custom_interpreter:
executable = osp.normpath(custom_interpreter)
if osp.isfile(executable):
self.add_to_custom_interpreters(executable)
# Setting executable option that will be used by other plugins in
# Spyder.
if osp.isfile(executable) and executable != self.get_conf(
"executable"
):
self.set_conf('executable', executable)
@on_conf_change(option=['executable'])
def on_executable_changed(self, value):
# announce update
interpreter = self.get_main_interpreter()
self._update_interpreter(interpreter)
self.sig_interpreter_changed.emit(interpreter)
def on_close(self):
self._get_envs_timer.stop()
self._check_interpreter_timer.stop()
self._worker_manager.terminate_all()
self.set_conf("last_envs", self.envs)
# ---- Public API
# -------------------------------------------------------------------------
def get_main_interpreter(self):
return self.get_conf('executable', get_python_executable())
def add_to_custom_interpreters(self, interpreter):
"""Add a new interpreter to the list of saved ones."""
custom_list = self.get_conf('custom_interpreters_list')
if interpreter not in custom_list:
custom_list.append(interpreter)
self.set_conf('custom_interpreters_list', custom_list)
def validate_custom_interpreters_list(self):
"""Check that the used custom interpreters are still valid."""
custom_list = self.get_conf('custom_interpreters_list')
valid_custom_list = []
for value in custom_list:
if osp.isfile(value) and not is_conda_based_app(value):
valid_custom_list.append(value)
self.set_conf('custom_interpreters_list', valid_custom_list)
# ---- Private API
# -------------------------------------------------------------------------
def _get_envs(self):
"""
Get the list of environments in a thread to keep them up to date.
"""
# Save copy of current envs to compare it after they are updated
self._current_envs = self.envs.copy()
# Validate list of custom interpreters before updating them
self.validate_custom_interpreters_list()
# Update envs
worker = self._worker_manager.create_python_worker(self._update_envs)
worker.sig_finished.connect(self._finish_updating_envs)
worker.start()
def _update_envs(self):
"""Update environments."""
# Compute info of default interpreter. We only need to do this once (at
# startup).
if self._startup:
self._get_env_info(self.internal_interpreter)
self._startup = False
# Update custom envs
last_envs: dict[str, tuple[str, str]] = self.get_conf("last_envs")
if last_envs:
for env in last_envs:
if not(
last_envs[env][0] == self.internal_interpreter
or env.startswith("Conda")
or env.startswith("Pyenv")
):
path = last_envs[env][0]
if osp.isfile(path):
self._get_env_info(path)
else:
self.envs.pop(env)
# Update conda/pyenv envs
return get_list_envs()
def _finish_updating_envs(self, worker, output, error):
"""Finish updating environments."""
# This is necessary to avoid an error when the worker can't return a
# proper output.
# Fixes spyder-ide/spyder#20539
if output is not None:
# Update envs with autodetected info
for new_name, new_info in output.items():
if new_name in self.envs:
# The env is already listed, so we don't need to do
# anything else.
continue
elif (
new_name not in self.envs
and new_info in self.envs.values()
):
# Replace name of envs that are detected as Conda/Pyenv
# envs after running _update_envs, but were listed in
# self.envs differently.
for name, info in self.envs.copy().items():
if info == new_info:
self.envs.pop(name)
self.envs[new_name] = info
break
else:
# Add new env to the current ones
self.envs[new_name] = new_info
if self._current_envs != self.envs:
for env in list(self.envs.keys()):
path, version = self.envs[env]
# Save paths in lowercase on Windows to avoid issues with
# capitalization.
path = path.lower() if os.name == 'nt' else path
self.path_to_env[path] = env
self._update_interpreter()
self.sig_environments_updated.emit(self.envs)
def _update_interpreter(self, interpreter=None):
"""Set main interpreter and update information."""
if interpreter:
logger.debug(f"Main interpreter changed to {interpreter}")
self._interpreter = interpreter
if self._interpreter not in self.path_to_env:
worker = self._worker_manager.create_python_worker(
self._get_env_info,
self._interpreter
)
worker.start()
worker.sig_finished.connect(self._finish_updating_interpreter)
def _finish_updating_interpreter(self, worker, output, error):
if output is None or error:
return
# We need to inform about envs being updated in case a custom env was
# added in Preferences, which will update its info.
self.sig_environments_updated.emit(self.envs)
def _get_env_info(self, path):
"""Get environment information."""
with QMutexLocker(self._lock):
original_path = path
path = path.lower() if os.name == 'nt' else path
try:
name = self.path_to_env[path]
except KeyError:
env_name = get_env_dir(original_path, only_dir=True)
if (
# For Anaconda/Miniconda distros
"conda" in path.lower()
# For Mambaforge
or "mamba" in path.lower()
# For Miniforge
or "miniforge" in path.lower()
# For our installers
or (is_conda_based_app() and "spyder-runtime" in path)
):
name = 'Conda: ' + env_name
elif 'pyenv' in path:
name = 'Pyenv: ' + env_name
else:
name = _("Custom") + ": " + env_name
version = get_interpreter_info(path)
self.path_to_env[path] = name
self.envs[name] = (original_path, version)
__, version = self.envs[name]
return f'{name} ({version})'
def _check_interpreter(self):
"""
Switch to default interpreter if current env was removed or update
Python version of current one.
"""
env_dir = get_env_dir(self._interpreter)
if not osp.isdir(env_dir):
# Env was removed on Mac or Linux
self._on_interpreter_removed()
elif not osp.isfile(self._interpreter):
# This can happen on Windows because the interpreter was renamed to
# .conda_trash
if not osp.isdir(osp.join(env_dir, 'conda-meta')):
# If conda-meta is missing, it means the env was removed
self._on_interpreter_removed()
else:
# If not, it means the interpreter is being updated so we need
# to update its version
self._get_envs()
else:
# We need to do this in case the Python version was changed in the
# env
if self._interpreter in self.path_to_env:
self._update_interpreter()
def _on_interpreter_removed(self):
"""
Actions to take when the current custom interpreter is removed
outside Spyder.
"""
# NOTES:
# 1. The interpreter will be updated when the option changes below
# generate a change in the 'executable' ooption.
# 2. *Do not* change the order in which these options are set or the
# interpreter won't be updated correctly.
self.set_conf('custom_interpreter', ' ')
self.set_conf('custom', False)
self.set_conf('default', True)
|
MainInterpreterContainer
|
python
|
jina-ai__jina
|
jina/jaml/parsers/executor/legacy.py
|
{
"start": 222,
"end": 4515
}
|
class ____(BaseLegacyParser):
"""Legacy parser for executor."""
def parse(
self,
cls: Type['BaseExecutor'],
data: Dict,
runtime_args: Optional[Dict[str, Any]] = None,
) -> 'BaseExecutor':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:param runtime_args: Optional runtime_args to be directly passed without being parsed into a yaml config
:return: the Flow YAML parser given the syntax version number
"""
from jina.logging.predefined import default_logger
_meta_config = get_default_metas()
_meta_config.update(data.get('metas', {}))
if _meta_config:
data['metas'] = _meta_config
cls._init_from_yaml = True
# tmp_p = {kk: expand_env_var(vv) for kk, vv in data.get('with', {}).items()}
if dataclasses.is_dataclass(cls):
obj = cls(
**data.get('with', {}),
)
cls.__bases__[0].__init__(
obj,
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
else:
obj = cls(
**data.get('with', {}),
metas=data.get('metas', {}),
requests=data.get('requests', {}),
dynamic_batching=data.get('dynamic_batching', {}),
runtime_args=runtime_args,
)
cls._init_from_yaml = False
# check if the yaml file used to instanciate 'cls' has arguments that are not in 'cls'
arguments_from_cls = ExecutorLegacyParser._get_all_arguments(cls)
arguments_from_yaml = set(data.get('with', {}))
difference_set = arguments_from_yaml - arguments_from_cls
# only log warnings about unknown args for main Pod
if any(difference_set) and not ExecutorLegacyParser.is_tail_or_head(data):
default_logger.warning(
f'The given arguments {difference_set} are not defined in `{cls.__name__}.__init__`'
)
if not _meta_config:
default_logger.warning(
'"metas" config is not found in this yaml file, '
'this map is important as it provides an unique identifier when '
'persisting the executor on disk.'
)
# for compound executor
if 'components' in data:
obj.components = lambda: data['components']
obj.is_updated = False
return obj
@staticmethod
def is_tail_or_head(data: Dict) -> bool:
"""Based on name, compute if this is a tail/head Pod or a main Pod
:param data: the data for the parser
:return: True if it is tail/head, False otherwise
"""
try:
name = data.get('runtime_args', {}).get('name', '')
return 'head' in name or 'tail' in name
except Exception as _:
pass # name can be None in tests since it's not passed
def dump(self, data: 'BaseExecutor') -> Dict:
"""
:param data: versioned executor object
:return: the dictionary given a versioned flow object
"""
# note: we only save non-default property for the sake of clarity
_defaults = get_default_metas()
p = (
{
k: getattr(data.metas, k)
for k, v in _defaults.items()
if getattr(data.metas, k) != v
}
if hasattr(data, 'metas')
else {}
)
a = {k: v for k, v in data._init_kwargs_dict.items() if k not in _defaults}
r = {}
if a:
r['with'] = a
if p:
r['metas'] = p
if hasattr(data, 'requests'):
r['requests'] = {k: v.fn.__name__ for k, v in data.requests.items()}
if hasattr(data, 'dynamic_batching'):
r['dynamic_batching'] = data.dynamic_batching
if hasattr(data, 'components'):
r['components'] = data.components
return r
|
ExecutorLegacyParser
|
python
|
davidhalter__jedi
|
jedi/inference/gradual/base.py
|
{
"start": 11251,
"end": 11969
}
|
class ____(ValueWrapper):
def py__stop_iteration_returns(self):
for cls in self._wrapped_value.class_value.py__mro__():
if cls.py__name__() == 'Generator':
generics = cls.get_generics()
try:
return generics[2].execute_annotation()
except IndexError:
pass
elif cls.py__name__() == 'Iterator':
return ValueSet([builtin_from_name(self.inference_state, 'None')])
return self._wrapped_value.py__stop_iteration_returns()
def get_type_hint(self, add_class_info=True):
return self._wrapped_value.class_value.get_type_hint(add_class_info=False)
|
_GenericInstanceWrapper
|
python
|
pennersr__django-allauth
|
allauth/account/views.py
|
{
"start": 42432,
"end": 46358
}
|
class ____(NextRedirectMixin, FormView):
form_class = VerifyPhoneForm
template_name = (
"account/confirm_phone_verification_code." + app_settings.TEMPLATE_EXTENSION
)
@cached_property
def _action(self):
action = self.request.POST.get("action")
valid_actions = ["verify"]
if self.process.can_change:
valid_actions.append("change")
if self.process.can_resend:
valid_actions.append("resend")
if action not in valid_actions:
action = "verify"
return action
def get_form_class(self):
if self._action == "change":
return self._get_change_form_class()
elif self._action == "resend":
return Form
return self._get_verify_form_class()
def _get_change_form_class(self):
return get_form_class(app_settings.FORMS, "change_phone", ChangePhoneForm)
def _get_verify_form_class(self):
return get_form_class(app_settings.FORMS, "verify_phone", self.form_class)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if self._action == "change":
kwargs["phone"] = self.process.phone
kwargs["user"] = self.process.user
elif self._action == "resend":
pass
else:
kwargs["code"] = self.process.code
kwargs["phone"] = self.process.phone
kwargs["user"] = self.process.user
return kwargs
def form_valid(self, form):
if self._action == "change":
return self._change_form_valid(form)
elif self._action == "resend":
return self._resend_form_valid(form)
return self._verify_form_valid(form)
def _resend_form_valid(self, form):
try:
self.process.resend()
except RateLimited:
adapter = get_adapter()
adapter.add_message(
self.request,
messages.ERROR,
message=adapter.error_messages["rate_limited"],
)
return HttpResponseRedirect(
self.passthrough_next_url(reverse("account_verify_phone"))
)
def _change_form_valid(self, form):
self.process.change_to(form.cleaned_data["phone"], form.account_already_exists)
return HttpResponseRedirect(
self.passthrough_next_url(reverse("account_verify_phone"))
)
def _verify_form_valid(self, form):
self.process.finish()
return self.respond_process_succeeded(form)
def form_invalid(self, form):
if self._action == "change":
return self._change_form_invalid(form)
return self._verify_form_invalid(form)
def _change_form_invalid(self, form):
return super().form_invalid(form)
def _verify_form_invalid(self, form):
attempts_left = self.process.record_invalid_attempt()
if attempts_left:
return super().form_invalid(form)
self.process.abort()
return self.respond_process_failed(form)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
ret["can_change"] = self.process.can_change
ret["can_resend"] = self.process.can_resend
site = get_current_site(self.request)
if self._action == "change":
ret["change_form"] = ret["form"]
ret["verify_form"] = self._get_verify_form_class()()
else:
ret["change_form"] = self._get_change_form_class()()
ret["verify_form"] = ret["form"]
ret.update(
{
"site": site,
"phone": self.process.phone,
"action": self._action,
}
)
return ret
@method_decorator(
login_stage_required(
stage=PhoneVerificationStage.key, redirect_urlname="account_login"
),
name="dispatch",
)
|
_BaseVerifyPhoneView
|
python
|
weaviate__weaviate-python-client
|
weaviate/groups/base.py
|
{
"start": 3449,
"end": 6160
}
|
class ____(Generic[ConnectionType], _BaseExecutor[ConnectionType]):
@overload
def get_assigned_roles(
self, *, group_id: str, include_permissions: Literal[False] = False
) -> executor.Result[Dict[str, RoleBase]]: ...
@overload
def get_assigned_roles(
self, *, group_id: str, include_permissions: Literal[True]
) -> executor.Result[Dict[str, Role]]: ...
@overload
def get_assigned_roles(
self,
*,
group_id: str,
include_permissions: bool = False,
) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]: ...
def get_assigned_roles(
self,
*,
group_id: str,
include_permissions: bool = False,
) -> executor.Result[Union[Dict[str, Role], Dict[str, RoleBase]]]:
"""Get the roles assigned to a group specific to the configured OIDC's dynamic auth functionality.
Args:
group_id: The group ID to get the roles for.
Returns:
A dictionary with role names as keys and the `Role` objects as values.
"""
return self._get_roles_of_group(
group_id,
USER_TYPE_OIDC,
include_permissions,
)
def assign_roles(
self,
*,
group_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Assign roles to a group specific to the configured OIDC's dynamic auth functionality.
Args:
role_names: The names of the roles to assign to the group.
group_id: The group to assign the roles to.
"""
return self._assign_roles_to_group(
[role_names] if isinstance(role_names, str) else role_names,
group_id,
USER_TYPE_OIDC,
)
def revoke_roles(
self,
*,
group_id: str,
role_names: Union[str, List[str]],
) -> executor.Result[None]:
"""Revoke roles from a group specific to the configured OIDC's dynamic auth functionality.
Args:
role_names: The names of the roles to revoke from the group.
group_id: The group to revoke the roles from.
"""
return self._revoke_roles_from_group(
[role_names] if isinstance(role_names, str) else role_names,
group_id,
USER_TYPE_OIDC,
)
def get_known_group_names(self) -> executor.Result[List[str]]:
"""Get the known group names specific to the configured OIDC's dynamic auth functionality.
Returns:
A list of known group names.
"""
return self._get_known_group_names(
USER_TYPE_OIDC,
)
|
_GroupsOIDCExecutor
|
python
|
astropy__astropy
|
astropy/visualization/stretch.py
|
{
"start": 25347,
"end": 27445
}
|
class ____(BaseStretch):
r"""
A stretch that takes into account contrast and bias.
The stretch is given by:
.. math::
y = (x - {\rm bias}) * {\rm contrast} + 0.5
and the output values are clipped to the [0:1] range.
Parameters
----------
contrast : float
The contrast parameter (see the above formula).
bias : float
The bias parameter (see the above formula).
Examples
--------
.. plot::
:show-source-link:
import numpy as np
from astropy.visualization import ContrastBiasStretch
from matplotlib import pyplot as plt
fig, ax = plt.subplots(figsize=(5, 5))
x = np.linspace(0, 1, 100)
contrasts = [1.0, 2.0, 0.7, 1.0, 1.0, 2.0]
biases = [0.5, 0.5, 0.5, 0.3, 0.7, 0.3]
for contrast, bias in zip(contrasts, biases):
stretch = ContrastBiasStretch(contrast, bias)
ax.plot(x, stretch(x, clip=True), label=f'{contrast=}, {bias=}')
ax.axis('equal')
ax.plot(x, x, ls='dotted', color='k', alpha=0.3)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_xlabel('Input Value')
ax.set_ylabel('Output Value')
ax.set_title(stretch.__class__.__name__)
ax.legend(loc='lower right', fontsize=8)
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, self.bias, out=values)
np.multiply(values, self.contrast, out=values)
np.add(values, 0.5, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedContrastBiasStretch(self.contrast, self.bias)
|
ContrastBiasStretch
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_dtype.py
|
{
"start": 11189,
"end": 11904
}
|
class ____(TestCase):
def test_simple(self):
class dt:
dtype = np.dtype("f8")
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
@skip(
reason="We simply require the .name attribute, so this "
"fails with an AttributeError."
)
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
@skip(reason="Parametric dtypes, our stuff is simpler.")
@instantiate_parametrized_tests
|
TestFromDTypeAttribute
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/exc.py
|
{
"start": 25979,
"end": 26111
}
|
class ____(Base20DeprecationWarning):
"""indicates an API that is in 'legacy' status, a long term deprecation."""
|
LegacyAPIWarning
|
python
|
tensorflow__tensorflow
|
tensorflow/python/summary/plugin_asset_test.py
|
{
"start": 887,
"end": 1055
}
|
class ____(plugin_asset.PluginAsset):
"""An example asset with a dummy serialize method provided, but no name."""
def assets(self):
return {}
|
_UnnamedPluginAsset
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_vl_hybrid/modeling_deepseek_vl_hybrid.py
|
{
"start": 9257,
"end": 10749
}
|
class ____(PreTrainedModel):
config: DeepseekVLHybridConfig
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = ["past_key_values", "causal_mask"]
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.text_config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
init.kaiming_normal_(module.weight, mode="fan_out", nonlinearity="relu")
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, DeepseekVLHybridLayerNorm):
init.ones_(module.weight)
init.zeros_(module.bias)
elif isinstance(module, DeepseekVLHybridModel):
init.zeros_(module.high_res_vision_alpha)
DEEPSEEK_VL_COMMON_CUSTOM_ARGS = r"""
high_res_pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size), *optional*):
The tensors corresponding to the input images. Pixel values can be obtained using
[`AutoImageProcessor`].
"""
@auto_docstring
|
DeepseekVLHybridPreTrainedModel
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/input_test.py
|
{
"start": 16707,
"end": 17391
}
|
class ____(test_lib.TestCase):
def testListInputs(self):
l = [1, 2, 3, 11, 22, 33]
l2 = inp._as_tensor_list(l)
self.assertEqual(l, l2)
l3 = inp._as_original_type(l, l2)
self.assertEqual(l, l3)
def testDictInputs(self):
d = {"a": 1, "b": 2, "c": 3, "aa": 11, "bb": 22, "cc": 33}
l = inp._as_tensor_list(d)
self.assertEqual([1, 11, 2, 22, 3, 33], l)
d2 = inp._as_original_type(d, l)
self.assertEqual(d, d2)
def testHeterogeneousKeysDictInputs(self):
d = {"z": 1, 1: 42, ("a", "b"): 100}
l = inp._as_tensor_list(d)
self.assertEqual([100, 42, 1], l)
d2 = inp._as_original_type(d, l)
self.assertEqual(d, d2)
|
DictHelperTest
|
python
|
mlflow__mlflow
|
tests/store/tracking/test_plugin_validation.py
|
{
"start": 2902,
"end": 4153
}
|
class ____(SqlAlchemyStore):
pass
db_path = r"{db_path}"
artifact_path = r"{artifact_path}"
store = PluginStore(f"sqlite:///{{db_path}}", artifact_path)
dataset = store.create_dataset("test_dataset", tags={{"key": "value"}}, experiment_ids=[])
assert dataset is not None
assert dataset.name == "test_dataset"
"""
subprocess.check_call([sys.executable, "-c", code], timeout=20)
def test_evaluation_dataset_not_in_entities_all():
"""
Regression test for circular import issue (https://github.com/mlflow/mlflow/issues/18386).
EvaluationDataset must be excluded from mlflow.entities.__all__ to prevent wildcard imports
from triggering circular dependencies. When store plugins are loaded via entrypoints, any
code that uses "from mlflow.entities import *" would pull in EvaluationDataset, which has
dependencies that create import cycles with the store infrastructure.
This test ensures EvaluationDataset remains importable directly but isn't exposed through
wildcard imports, allowing plugins to safely inherit from store classes without encountering
circular import issues during initialization.
"""
import mlflow.entities
assert "EvaluationDataset" not in mlflow.entities.__all__
|
PluginStore
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/declarative_automation/operators/check_operators.py
|
{
"start": 4632,
"end": 6037
}
|
class ____(ChecksAutomationCondition):
@property
def base_name(self) -> str:
return "ANY_CHECKS_MATCH"
@property
def operator_type(self) -> OperatorType:
return "or"
async def evaluate(self, context: AutomationContext[AssetKey]) -> AutomationResult[AssetKey]: # pyright: ignore[reportIncompatibleMethodOverride]
true_subset = context.get_empty_subset()
coroutines = [
context.for_child_condition(
child_condition=EntityMatchesCondition(key=check_key, operand=self.operand),
child_indices=[
None,
i,
], # Prefer a non-indexed ID in case asset keys move around, but fall back to the indexed one for back-compat
candidate_subset=context.candidate_subset,
).evaluate_async()
for i, check_key in enumerate(
sorted(self._get_check_keys(context.key, context.asset_graph))
)
]
check_results = await asyncio.gather(*coroutines)
for check_result in check_results:
true_subset = true_subset.compute_union(check_result.true_subset)
true_subset = context.candidate_subset.compute_intersection(true_subset)
return AutomationResult(context, true_subset=true_subset, child_results=check_results)
@whitelist_for_serdes
@record
|
AnyChecksCondition
|
python
|
scipy__scipy
|
scipy/integrate/_ivp/rk.py
|
{
"start": 22150,
"end": 22800
}
|
class ____(DenseOutput):
def __init__(self, t_old, t, y_old, F):
super().__init__(t_old, t)
self.h = t - t_old
self.F = F
self.y_old = y_old
def _call_impl(self, t):
x = (t - self.t_old) / self.h
if t.ndim == 0:
y = np.zeros_like(self.y_old)
else:
x = x[:, None]
y = np.zeros((len(x), len(self.y_old)), dtype=self.y_old.dtype)
for i, f in enumerate(reversed(self.F)):
y += f
if i % 2 == 0:
y *= x
else:
y *= 1 - x
y += self.y_old
return y.T
|
Dop853DenseOutput
|
python
|
chroma-core__chroma
|
chromadb/test/property/test_collections_with_database_tenant_overwrite.py
|
{
"start": 793,
"end": 7713
}
|
class ____(
TenantDatabaseCollectionStateMachine
):
singleton_client: Client
singleton_admin_client: AdminAPI
root_client: Client
root_admin_client: AdminAPI
def __init__(
self,
singleton_client: Client,
root_client: Client,
client_factories: ClientFactories,
) -> None:
super().__init__(client_factories)
self.root_client = root_client
self.root_admin_client = self.admin_client
self.singleton_client = singleton_client
self.singleton_admin_client = AdminClient.from_system(singleton_client._system)
@initialize()
def initialize(self) -> None:
# Make sure we're back to the root client and admin client before
# doing reset/initialize things.
self.client = self.root_client
self.admin_client = self.root_admin_client
super().initialize()
self.root_admin_client.create_tenant(SINGLETON_TENANT)
self.root_admin_client.create_database(SINGLETON_DATABASE, SINGLETON_TENANT)
self.set_tenant_model(SINGLETON_TENANT, {})
self.set_database_model_for_tenant(SINGLETON_TENANT, SINGLETON_DATABASE, {})
@invariant()
def check_api_and_admin_client_are_in_sync(self) -> None:
if self.client == self.singleton_client:
assert self.admin_client == self.singleton_admin_client
else:
assert self.admin_client == self.root_admin_client
@rule()
def change_clients(self) -> None:
if self.client == self.singleton_client:
self.client = self.root_client
self.admin_client = self.root_admin_client
else:
self.client = self.singleton_client
self.admin_client = self.singleton_admin_client
@overrides
def set_api_tenant_database(self, tenant: str, database: str) -> None:
self.root_client.set_tenant(tenant, database)
@overrides
def get_tenant_model(
self, tenant: str
) -> Dict[str, Dict[str, Optional[types.CollectionMetadata]]]:
if self.client == self.singleton_client:
tenant = SINGLETON_TENANT
return self.tenant_to_database_to_model[tenant]
@overrides
def set_tenant_model(
self,
tenant: str,
model: Dict[str, Dict[str, Optional[types.CollectionMetadata]]],
) -> None:
if self.client == self.singleton_client:
# This never happens because we never actually issue a
# create_tenant call on singleton_tenant:
# thanks to the above overriding of get_tenant_model(),
# the underlying state machine test should always expect an error
# when it sends the request, so shouldn't try to update the model.
raise ValueError("trying to overwrite the model for singleton??")
self.tenant_to_database_to_model[tenant] = model
@overrides
def set_database_model_for_tenant(
self,
tenant: str,
database: str,
database_model: Dict[str, Optional[types.CollectionMetadata]],
) -> None:
if self.client == self.singleton_client:
# This never happens because we never actually issue a
# create_database call on (singleton_tenant, singleton_database):
# thanks to the above overriding of has_database_for_tenant(),
# the underlying state machine test should always expect an error
# when it sends the request, so shouldn't try to update the model.
raise ValueError("trying to overwrite the model for singleton??")
self.tenant_to_database_to_model[tenant][database] = database_model
@overrides
def overwrite_database(self, database: str) -> str:
if self.client == self.singleton_client:
return SINGLETON_DATABASE
return database
@overrides
def overwrite_tenant(self, tenant: str) -> str:
if self.client == self.singleton_client:
return SINGLETON_TENANT
return tenant
@property
def model(self) -> Dict[str, Optional[types.CollectionMetadata]]:
if self.client == self.singleton_client:
return self.tenant_to_database_to_model[SINGLETON_TENANT][
SINGLETON_DATABASE
]
return self.tenant_to_database_to_model[self.curr_tenant][self.curr_database]
def _singleton_and_root_clients() -> Tuple[Client, Client, ClientFactories]:
api_fixture = fastapi_fixture_admin_and_singleton_tenant_db_user()
sys: System = next(api_fixture)
sys.reset_state()
client_factories = ClientFactories(sys)
root_client = client_factories.create_client()
_root_admin_client = client_factories.create_admin_client_from_system()
# This is a little awkward but we have to create the tenant and DB
# before we can instantiate a Client which connects to them. This also
# means we need to manually populate state in the state machine.
_root_admin_client.create_tenant(SINGLETON_TENANT)
_root_admin_client.create_database(SINGLETON_DATABASE, SINGLETON_TENANT)
singleton_settings = Settings(**dict(sys.settings))
singleton_settings.chroma_client_auth_credentials = "singleton-token"
singleton_system = System(singleton_settings)
singleton_system.start()
singleton_client = Client.from_system(singleton_system)
return singleton_client, root_client, client_factories
def test_collections_with_tenant_database_overwrite(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.ERROR)
singleton_client, root_client, client_factories = _singleton_and_root_clients()
run_state_machine_as_test(
lambda: SingletonTenantDatabaseCollectionStateMachine(
singleton_client, root_client, client_factories
)
) # type: ignore
def test_repeat_failure(
caplog: pytest.LogCaptureFixture,
) -> None:
caplog.set_level(logging.ERROR)
singleton_client, root_client, client_factories = _singleton_and_root_clients()
state = SingletonTenantDatabaseCollectionStateMachine(
singleton_client, root_client, client_factories
)
state.initialize()
state.check_api_and_admin_client_are_in_sync()
state.change_clients()
state.check_api_and_admin_client_are_in_sync()
state.create_coll(
coll=strategies.Collection(
name="A00",
metadata=None,
embedding_function=strategies.hashing_embedding_function(
dim=2, dtype=numpy.float16 # type: ignore
),
id=uuid.UUID("c9bcb72f-92b1-4604-a8cb-084162dfe98b"),
dimension=2,
dtype=numpy.float16,
known_metadata_keys={},
known_document_keywords=[],
has_documents=False,
has_embeddings=True,
)
)
state.teardown() # type: ignore
|
SingletonTenantDatabaseCollectionStateMachine
|
python
|
great-expectations__great_expectations
|
great_expectations/exceptions/resource_freshness.py
|
{
"start": 2275,
"end": 2594
}
|
class ____(ResourceFreshnessError):
def __init__(self, name: str) -> None:
super().__init__(
f"BatchDefinition '{name}' has changed since it has last been saved. "
"Please update using the parent asset or data source, then try your action again."
)
|
BatchDefinitionNotFreshError
|
python
|
numba__numba
|
numba/core/typing/builtins.py
|
{
"start": 28145,
"end": 28617
}
|
class ____(AbstractTemplate):
def generic(self, args, kws):
assert not kws
[arg] = args
if isinstance(arg, (types.Boolean, types.Number)):
return signature(types.boolean, arg)
# XXX typing for bool cannot be polymorphic because of the
# types.Function thing, so we redirect to the operator.truth
# intrinsic.
return self.context.resolve_function_type(operator.truth, args, kws)
@infer_global(int)
|
Bool
|
python
|
tox-dev__tox
|
src/tox/config/of_type.py
|
{
"start": 2249,
"end": 4802
}
|
class ____(ConfigDefinition[T]): # noqa: PLW1641
"""A configuration definition that comes from a source (such as in memory, an ini file, a toml file, etc.)."""
def __init__( # noqa: PLR0913
self,
keys: Iterable[str],
desc: str,
of_type: type[T] | UnionType,
default: Callable[[Config, str | None], T] | T,
post_process: Callable[[T], T] | None = None,
factory: Factory[T] | None = None,
) -> None:
super().__init__(keys, desc)
self.of_type = of_type
self.default = default
self.post_process = post_process
self.factory = factory
self._cache: object | T = _PLACE_HOLDER
def __call__(
self,
conf: Config,
loaders: list[Loader[T]],
args: ConfigLoadArgs,
) -> T:
if self._cache is _PLACE_HOLDER:
for key, loader in product(self.keys, loaders):
chain_key = f"{loader.section.key}.{key}"
try:
if chain_key in args.chain:
values = args.chain[args.chain.index(chain_key) :]
msg = f"circular chain detected {', '.join(values)}"
raise CircularChainError(msg)
finally:
args.chain.append(chain_key)
try:
value = loader.load(key, self.of_type, self.factory, conf, args)
except KeyError:
continue
else:
break
finally:
del args.chain[-1]
else:
value = self.default(conf, args.env_name) if callable(self.default) else self.default
if self.post_process is not None:
value = self.post_process(value)
self._cache = value
return cast("T", self._cache)
def __repr__(self) -> str:
values = ((k, v) for k, v in vars(self).items() if k not in {"post_process", "_cache"} and v is not None)
return f"{type(self).__name__}({', '.join(f'{k}={v}' for k, v in values)})"
def __eq__(self, o: object) -> bool:
return (
type(self) == type(o) # noqa: E721
and super().__eq__(o)
and (self.of_type, self.default, self.post_process) == (o.of_type, o.default, o.post_process) # type: ignore[attr-defined]
)
__all__ = [
"ConfigConstantDefinition",
"ConfigDefinition",
"ConfigDynamicDefinition",
"ConfigLoadArgs",
]
|
ConfigDynamicDefinition
|
python
|
django__django
|
tests/serializers/models/base.py
|
{
"start": 1945,
"end": 2166
}
|
class ____(models.Model):
author = models.OneToOneField(Author, models.CASCADE, primary_key=True)
date_of_birth = models.DateField()
def __str__(self):
return "Profile of %s" % self.author
|
AuthorProfile
|
python
|
pytorch__pytorch
|
torch/backends/cuda/__init__.py
|
{
"start": 2509,
"end": 3797
}
|
class ____:
r"""
Represent all cuFFT plan caches, return the cuFFTPlanCache for a given device when indexed.
Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g.,
setting the `.max_size`) attribute, the current device's cuFFT plan cache is
used.
"""
__initialized = False
def __init__(self):
self.caches = []
self.__initialized = True
def __getitem__(self, device):
index = torch.cuda._utils._get_device_index(device)
if index < 0 or index >= torch.cuda.device_count():
raise RuntimeError(
f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got "
f"device with index {index}"
)
if len(self.caches) == 0:
self.caches.extend(
cuFFTPlanCache(index) for index in range(torch.cuda.device_count())
)
return self.caches[index]
def __getattr__(self, name):
return getattr(self[torch.cuda.current_device()], name)
def __setattr__(self, name, value):
if self.__initialized:
return setattr(self[torch.cuda.current_device()], name, value)
else:
return super().__setattr__(name, value)
|
cuFFTPlanCacheManager
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py
|
{
"start": 118974,
"end": 119692
}
|
class ____(GeneratedAirbyteDestination):
@public
def __init__(self, name: str, host: str, api_key: Optional[str] = None):
"""Airbyte Destination for Meilisearch.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/meilisearch
Args:
name (str): The name of the destination.
host (str): Hostname of the MeiliSearch instance.
api_key (Optional[str]): MeiliSearch API Key. See the docs for more information on how to obtain this key.
"""
self.host = check.str_param(host, "host")
self.api_key = check.opt_str_param(api_key, "api_key")
super().__init__("Meilisearch", name)
|
MeilisearchDestination
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_cycles.py
|
{
"start": 46472,
"end": 47584
}
|
class ____(fixtures.DeclarativeMappedTest):
"""test #12748"""
run_setup_classes = "each"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
count = 0
def _counter():
nonlocal count
count += 1
return count
class Parent(Base):
__tablename__ = "parent"
id = mapped_column(Integer, primary_key=True)
related = relationship("Related", post_update=True)
class Related(Base):
__tablename__ = "related"
id = mapped_column(Integer, primary_key=True)
parent_id = mapped_column(ForeignKey("parent.id"))
counter = mapped_column(Integer, onupdate=_counter)
def test_update_counter(self, connection):
Parent, Related = self.classes("Parent", "Related")
p1 = Parent(related=[Related(), Related(), Related()])
with Session(connection, expire_on_commit=False) as sess:
sess.add(p1)
sess.commit()
eq_([rel.counter for rel in p1.related], [1, 2, 3])
|
PostUpdatePrefetchTest
|
python
|
Textualize__textual
|
tests/test_binding_inheritance.py
|
{
"start": 14267,
"end": 14684
}
|
class ____(Static, can_focus=True, inherit_bindings=False):
"""A widget that has its own bindings for the movement keys, no binding inheritance."""
BINDINGS = AppKeyRecorder.make_bindings("local_")
async def action_local_record(self, key: str) -> None:
# Sneaky forward reference. Just for the purposes of testing.
await self.app.action_record(f"locally_{key}")
|
WidgetWithBindingsNoInherit
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/types.py
|
{
"start": 766,
"end": 1118
}
|
class ____:
"""Base for MySQL numeric types.
This is the base both for NUMERIC as well as INTEGER, hence
it's a mixin.
"""
def __init__(
self, unsigned: bool = False, zerofill: bool = False, **kw: Any
):
self.unsigned = unsigned
self.zerofill = zerofill
super().__init__(**kw)
|
_NumericCommonType
|
python
|
django__django
|
tests/migrations/migrations_test_apps/alter_fk/author_app/migrations/0002_alter_id.py
|
{
"start": 43,
"end": 421
}
|
class ____(migrations.Migration):
dependencies = [
("author_app", "0001_initial"),
("book_app", "0001_initial"), # Forces the book table to alter the FK
]
operations = [
migrations.AlterField(
model_name="author",
name="id",
field=models.CharField(max_length=10, primary_key=True),
),
]
|
Migration
|
python
|
getsentry__sentry
|
src/sentry/discover/endpoints/discover_key_transactions.py
|
{
"start": 5632,
"end": 6534
}
|
class ____(KeyTransactionBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (KeyTransactionPermission,)
def get(self, request: Request, organization: Organization) -> Response:
if not self.has_feature(organization, request):
return Response(status=404)
try:
teams = get_teams(request, organization)
except InvalidParams as err:
return Response(str(err), status=400)
projects = self.get_projects(request, organization)
serializer = KeyTransactionTeamSerializer(projects)
return self.paginate(
request=request,
queryset=teams,
order_by="slug",
on_results=lambda x: serialize(x, request.user, serializer),
paginator_cls=OffsetPaginator,
)
@register(TeamKeyTransaction)
|
KeyTransactionListEndpoint
|
python
|
walkccc__LeetCode
|
solutions/3159. Find Occurrences of an Element in an Array/3159.py
|
{
"start": 0,
"end": 295
}
|
class ____:
def occurrencesOfElement(
self,
nums: list[int],
queries: list[int],
x: int,
) -> list[int]:
indices = [i for i, num in enumerate(nums) if num == x]
return [indices[query - 1] if query <= len(indices) else -1
for query in queries]
|
Solution
|
python
|
pyinstaller__pyinstaller
|
PyInstaller/utils/osx.py
|
{
"start": 12999,
"end": 31254
}
|
class ____(Exception):
"""
Exception raised by `binary_to_target_arch` when the passed binary fails the strict architecture check.
"""
def __init__(self, message):
url = "https://pyinstaller.org/en/stable/feature-notes.html#macos-multi-arch-support"
super().__init__(f"{message} For details about this error message, see: {url}")
def get_binary_architectures(filename):
"""
Inspects the given binary and returns tuple (is_fat, archs), where is_fat is boolean indicating fat/thin binary,
and arch is list of architectures with lipo/codesign compatible names.
"""
try:
executable = MachO(filename)
except ValueError as e:
raise InvalidBinaryError("Invalid Mach-O binary!") from e
return bool(executable.fat), [_get_arch_string(hdr.header) for hdr in executable.headers]
def convert_binary_to_thin_arch(filename, thin_arch, output_filename=None):
"""
Convert the given fat binary into thin one with the specified target architecture.
"""
output_filename = output_filename or filename
cmd_args = ['lipo', '-thin', thin_arch, filename, '-output', output_filename]
p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if p.returncode:
raise SystemError(f"lipo command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
def merge_into_fat_binary(output_filename, *slice_filenames):
"""
Merge the given single-arch thin binary files into a fat binary.
"""
cmd_args = ['lipo', '-create', '-output', output_filename, *slice_filenames]
p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if p.returncode:
raise SystemError(f"lipo command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
def binary_to_target_arch(filename, target_arch, display_name=None):
"""
Check that the given binary contains required architecture slice(s) and convert the fat binary into thin one,
if necessary.
"""
if not display_name:
display_name = filename # Same as input file
# Check the binary
is_fat, archs = get_binary_architectures(filename)
if target_arch == 'universal2':
if not is_fat:
raise IncompatibleBinaryArchError(f"{display_name} is not a fat binary!")
# Assume fat binary is universal2; nothing to do
else:
if is_fat:
if target_arch not in archs:
raise IncompatibleBinaryArchError(f"{display_name} does not contain slice for {target_arch}!")
# Convert to thin arch
logger.debug("Converting fat binary %s (%s) to thin binary (%s)", filename, display_name, target_arch)
convert_binary_to_thin_arch(filename, target_arch)
else:
if target_arch not in archs:
raise IncompatibleBinaryArchError(
f"{display_name} is incompatible with target arch {target_arch} (has arch: {archs[0]})!"
)
# Binary has correct arch; nothing to do
def remove_signature_from_binary(filename):
"""
Remove the signature from all architecture slices of the given binary file using the codesign utility.
"""
logger.debug("Removing signature from file %r", filename)
cmd_args = ['/usr/bin/codesign', '--remove', '--all-architectures', filename]
p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if p.returncode:
raise SystemError(f"codesign command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
def sign_binary(filename, identity=None, entitlements_file=None, deep=False):
"""
Sign the binary using codesign utility. If no identity is provided, ad-hoc signing is performed.
"""
extra_args = []
if not identity:
identity = '-' # ad-hoc signing
else:
extra_args.append('--options=runtime') # hardened runtime
if entitlements_file:
extra_args.append('--entitlements')
extra_args.append(entitlements_file)
if deep:
extra_args.append('--deep')
logger.debug("Signing file %r", filename)
cmd_args = [
'/usr/bin/codesign', '-s', identity, '--force', '--all-architectures', '--timestamp', *extra_args, filename
]
p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if p.returncode:
raise SystemError(f"codesign command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}")
def set_dylib_dependency_paths(filename, target_rpath):
"""
Modify the given dylib's identity (in LC_ID_DYLIB command) and the paths to dependent dylibs (in LC_LOAD_DYLIB)
commands into `@rpath/<basename>` format, remove any existing rpaths (LC_RPATH commands), and add a new rpath
(LC_RPATH command) with the specified path.
Uses `install-tool-name` utility to make the changes.
The system libraries (e.g., the ones found in /usr/lib) are exempted from path rewrite.
For multi-arch fat binaries, this function extracts each slice into temporary file, processes it separately,
and then merges all processed slices back into fat binary. This is necessary because `install-tool-name` cannot
modify rpaths in cases when an existing rpath is present only in one slice.
"""
# Check if we are dealing with a fat binary; the `install-name-tool` seems to be unable to remove an rpath that is
# present only in one slice, so we need to extract each slice, process it separately, and then stich processed
# slices back into a fat binary.
is_fat, archs = get_binary_architectures(filename)
if is_fat:
with tempfile.TemporaryDirectory() as tmpdir:
slice_filenames = []
for arch in archs:
slice_filename = os.path.join(tmpdir, arch)
convert_binary_to_thin_arch(filename, arch, output_filename=slice_filename)
_set_dylib_dependency_paths(slice_filename, target_rpath)
slice_filenames.append(slice_filename)
merge_into_fat_binary(filename, *slice_filenames)
else:
# Thin binary - we can process it directly
_set_dylib_dependency_paths(filename, target_rpath)
def _set_dylib_dependency_paths(filename, target_rpath):
"""
The actual implementation of set_dylib_dependency_paths functionality.
Implicitly assumes that a single-arch thin binary is given.
"""
# Relocatable commands that we should overwrite - same list as used by `macholib`.
_RELOCATABLE = {
LC_LOAD_DYLIB,
LC_LOAD_UPWARD_DYLIB,
LC_LOAD_WEAK_DYLIB,
LC_PREBOUND_DYLIB,
LC_REEXPORT_DYLIB,
}
# Parse dylib's header to extract the following commands:
# - LC_LOAD_DYLIB (or any member of _RELOCATABLE list): dylib load commands (dependent libraries)
# - LC_RPATH: rpath definitions
# - LC_ID_DYLIB: dylib's identity
binary = MachO(filename)
dylib_id = None
rpaths = set()
linked_libs = set()
for header in binary.headers:
for cmd in header.commands:
lc_type = cmd[0].cmd
if lc_type not in _RELOCATABLE and lc_type not in {LC_RPATH, LC_ID_DYLIB}:
continue
# Decode path, strip trailing NULL characters
path = cmd[2].decode('utf-8').rstrip('\x00')
if lc_type in _RELOCATABLE:
linked_libs.add(path)
elif lc_type == LC_RPATH:
rpaths.add(path)
elif lc_type == LC_ID_DYLIB:
dylib_id = path
del binary
# If dylib has identifier set, compute the normalized version, in form of `@rpath/basename`.
normalized_dylib_id = None
if dylib_id:
normalized_dylib_id = str(pathlib.PurePath('@rpath') / pathlib.PurePath(dylib_id).name)
# Find dependent libraries that should have their prefix path changed to `@rpath`. If any dependent libraries
# end up using `@rpath` (originally or due to rewrite), set the `rpath_required` boolean to True, so we know
# that we need to add our rpath.
changed_lib_paths = []
rpath_required = False
for linked_lib in linked_libs:
# Leave system dynamic libraries unchanged.
if macholib.util.in_system_path(linked_lib):
continue
# The older python.org builds that use system Tcl/Tk framework have their _tkinter.cpython-*-darwin.so
# library linked against /Library/Frameworks/Tcl.framework/Versions/8.5/Tcl and
# /Library/Frameworks/Tk.framework/Versions/8.5/Tk, although the actual frameworks are located in
# /System/Library/Frameworks. Therefore, they slip through the above in_system_path() check, and we need to
# exempt them manually.
_exemptions = [
'/Library/Frameworks/Tcl.framework/',
'/Library/Frameworks/Tk.framework/',
]
if any([x in linked_lib for x in _exemptions]):
continue
# This linked library will end up using `@rpath`, whether modified or not...
rpath_required = True
new_path = str(pathlib.PurePath('@rpath') / pathlib.PurePath(linked_lib).name)
if linked_lib == new_path:
continue
changed_lib_paths.append((linked_lib, new_path))
# Gather arguments for `install-name-tool`
install_name_tool_args = []
# Modify the dylib identifier if necessary
if normalized_dylib_id and normalized_dylib_id != dylib_id:
install_name_tool_args += ["-id", normalized_dylib_id]
# Changed libs
for original_path, new_path in changed_lib_paths:
install_name_tool_args += ["-change", original_path, new_path]
# Remove all existing rpaths except for the target rpath (if it already exists). `install_name_tool` disallows using
# `-delete_rpath` and `-add_rpath` with the same argument.
for rpath in rpaths:
if rpath == target_rpath:
continue
install_name_tool_args += [
"-delete_rpath",
rpath,
]
# If any of linked libraries use @rpath now and our target rpath is not already added, add it.
# NOTE: @rpath in the dylib identifier does not actually require the rpath to be set on the binary...
if rpath_required and target_rpath not in rpaths:
install_name_tool_args += [
"-add_rpath",
target_rpath,
]
# If we have no arguments, finish immediately.
if not install_name_tool_args:
return
# Run `install_name_tool`
cmd_args = ["install_name_tool", *install_name_tool_args, filename]
p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')
if p.returncode:
raise SystemError(
f"install_name_tool command ({cmd_args}) failed with error code {p.returncode}!\noutput: {p.stdout}"
)
def is_framework_bundle_lib(lib_path):
"""
Check if the given shared library is part of a .framework bundle.
"""
lib_path = pathlib.PurePath(lib_path)
# For now, focus only on versioned layout, such as `QtCore.framework/Versions/5/QtCore`
if lib_path.parent.parent.name != "Versions":
return False
if lib_path.parent.parent.parent.name != lib_path.name + ".framework":
return False
return True
def collect_files_from_framework_bundles(collected_files):
"""
Scan the given TOC list of collected files for shared libraries that are collected from macOS .framework bundles,
and collect the bundles' Info.plist files. Additionally, the following symbolic links:
- `Versions/Current` pointing to the `Versions/<version>` directory containing the binary
- `<name>` in the top-level .framework directory, pointing to `Versions/Current/<name>`
- `Resources` in the top-level .framework directory, pointing to `Versions/Current/Resources`
- additional directories in top-level .framework directory, pointing to their counterparts in `Versions/Current`
directory.
Returns TOC list for the discovered Info.plist files and generated symbolic links. The list does not contain
duplicated entries.
"""
invalid_framework_found = False
framework_files = set() # Additional entries for collected files. Use set for de-duplication.
framework_paths = set() # Registered framework paths for 2nd pass.
# 1st pass: discover binaries from .framework bundles, and for each such binary:
# - collect `Info.plist`
# - create `Current` -> `<version>` symlink in `<name>.framework/Versions` directory.
# - create `<name>.framework/<name>` -> `<name>.framework/Versions/Current/<name>` symlink.
# - create `<name>.framework/Resources` -> `<name>.framework/Versions/Current/Resources` symlink.
for dest_name, src_name, typecode in collected_files:
if typecode != 'BINARY':
continue
src_path = pathlib.Path(src_name) # /src/path/to/<name>.framework/Versions/<version>/<name>
dest_path = pathlib.PurePath(dest_name) # /dest/path/to/<name>.framework/Versions/<version>/<name>
# Check whether binary originates from a .framework bundle
if not is_framework_bundle_lib(src_path):
continue
# Check whether binary is also collected into a .framework bundle (i.e., the original layout is preserved)
if not is_framework_bundle_lib(dest_path):
continue
# Assuming versioned layout, Info.plist should exist in Resources directory located next to the binary.
info_plist_src = src_path.parent / "Resources" / "Info.plist"
if not info_plist_src.is_file():
# Alas, the .framework bundles shipped with PySide/PyQt might have Info.plist available only in the
# top-level Resources directory. So accommodate this scenario as well, but collect the file into
# versioned directory to appease the code-signing gods...
info_plist_src_top = src_path.parent.parent.parent / "Resources" / "Info.plist"
if not info_plist_src_top.is_file():
# Strictly speaking, a .framework bundle without Info.plist is invalid. However, that did not prevent
# PyQt from shipping such Qt .framework bundles up until v5.14.1. So by default, we just complain via
# a warning message; if such binaries work in unfrozen python, they should also work in frozen
# application. The codesign will refuse to sign the .app bundle (if we are generating one), but there
# is nothing we can do about that.
invalid_framework_found = True
framework_dir = src_path.parent.parent.parent
if compat.strict_collect_mode:
raise SystemError(f"Could not find Info.plist in {framework_dir}!")
else:
logger.warning("Could not find Info.plist in %s!", framework_dir)
continue
info_plist_src = info_plist_src_top
info_plist_dest = dest_path.parent / "Resources" / "Info.plist"
framework_files.add((str(info_plist_dest), str(info_plist_src), "DATA"))
# Reconstruct the symlink Versions/Current -> Versions/<version>.
# This one seems to be necessary for code signing, but might be absent from .framework bundles shipped with
# python packages. So we always create it ourselves.
framework_files.add((str(dest_path.parent.parent / "Current"), str(dest_path.parent.name), "SYMLINK"))
dest_framework_path = dest_path.parent.parent.parent # Top-level .framework directory path.
# Symlink the binary in the `Current` directory to the top-level .framework directory.
framework_files.add((
str(dest_framework_path / dest_path.name),
str(pathlib.PurePath("Versions/Current") / dest_path.name),
"SYMLINK",
))
# Ditto for the `Resources` directory.
framework_files.add((
str(dest_framework_path / "Resources"),
"Versions/Current/Resources",
"SYMLINK",
))
# Register the framework parent path to use in additional directories scan in subsequent pass.
framework_paths.add(dest_framework_path)
# 2nd pass: scan for additional collected directories from .framework bundles, and create symlinks to the top-level
# application directory. Make the outer loop go over the registered framework paths, so it becomes no-op if no
# framework paths are registered.
VALID_SUBDIRS = {'Helpers', 'Resources'}
for dest_framework_path in framework_paths:
for dest_name, src_name, typecode in collected_files:
dest_path = pathlib.PurePath(dest_name)
# Try matching against framework path
try:
remaining_path = dest_path.relative_to(dest_framework_path)
except ValueError: # dest_path is not subpath of dest_framework_path
continue
remaining_path_parts = remaining_path.parts
# We are interested only in entries under Versions directory.
if remaining_path_parts[0] != 'Versions':
continue
# If the entry name is among valid sub-directory names, create symlink.
dir_name = remaining_path_parts[2]
if dir_name not in VALID_SUBDIRS:
continue
framework_files.add((
str(dest_framework_path / dir_name),
str(pathlib.PurePath("Versions/Current") / dir_name),
"SYMLINK",
))
# If we encountered an invalid .framework bundle without Info.plist, warn the user that code-signing will most
# likely fail.
if invalid_framework_found:
logger.warning(
"One or more collected .framework bundles have missing Info.plist file. If you are building an .app "
"bundle, you will most likely not be able to code-sign it."
)
return sorted(framework_files)
|
IncompatibleBinaryArchError
|
python
|
spack__spack
|
lib/spack/spack/vendor/ruamel/yaml/comments.py
|
{
"start": 23769,
"end": 24160
}
|
class ____(CommentedMapView):
__slots__ = ()
def __contains__(self, value):
# type: (Any) -> Any
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
# type: () -> Any
for key in self._mapping._keys():
yield self._mapping[key]
|
CommentedMapValuesView
|
python
|
pytorch__pytorch
|
torch/_inductor/codegen/common.py
|
{
"start": 8418,
"end": 8778
}
|
class ____:
scheduling: SchedulingConstructor
wrapper_codegen: WrapperConstructor
cpp_wrapper_codegen: Optional[WrapperConstructor] = None
fx_wrapper_codegen: Optional[WrapperConstructor] = None
KernelArgType = Union[WorkspaceArg, TensorArg, SizeArg, TMADescriptorArg, ConstexprArg]
device_codegens: dict[str, DeviceCodegen] = {}
|
DeviceCodegen
|
python
|
google__pytype
|
pytype/tests/test_dataclass_transform.py
|
{
"start": 3050,
"end": 7196
}
|
class ____(test_base.BaseTest):
"""Tests for @dataclass_transform on classes."""
def test_single_inheritance(self):
self.CheckWithErrors("""
from typing_extensions import dataclass_transform
@dataclass_transform()
class Base: ...
class A(Base):
x: int
y: str
class B(A):
z: int
a = B(1, '2', 3)
b = B(1, 2) # missing-parameter
c = B(1, 2, 3) # wrong-arg-types
""")
def test_multiple_inheritance(self):
self.CheckWithErrors("""
from typing_extensions import dataclass_transform
@dataclass_transform()
class Mixin: ...
class Base:
pass
class A(Base, Mixin):
x: int
y: str
class B(A):
z: int
a = B(1, '2', 3)
b = B(1, 2) # missing-parameter
c = B(1, 2, 3) # wrong-arg-types
""")
def test_redundant_decorator(self):
self.CheckWithErrors("""
import dataclasses
from typing_extensions import dataclass_transform
@dataclass_transform()
class Base: ...
@dataclasses.dataclass
class A(Base):
x: int
y: str
class B(A):
z: int
a = B(1, '2', 3)
b = B(1, 2) # missing-parameter
c = B(1, 2, 3) # wrong-arg-types
""")
def test_redundant_decorator_pyi(self):
ty = self.Infer("""
import dataclasses
from typing_extensions import dataclass_transform
@dataclass_transform()
class A:
pass
@dataclasses.dataclass
class B(A):
x: int
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import dataclass_transform
@dataclass_transform
class A: ...
@dataclasses.dataclass
class B(A):
x: int
def __init__(self, x: int) -> None: ...
""",
)
def test_write_pyi(self):
ty = self.Infer("""
from typing_extensions import dataclass_transform
@dataclass_transform()
class Mixin: ...
class Base:
pass
class A(Base, Mixin):
x: int
y: str
class B(A):
z: int
""")
self.assertTypesMatchPytd(
ty,
"""
import dataclasses
from typing import dataclass_transform
@dataclasses.dataclass
class A(Base, Mixin):
x: int
y: str
def __init__(self, x: int, y: str) -> None: ...
@dataclasses.dataclass
class B(A):
z: int
def __init__(self, x: int, y: str, z: int) -> None: ...
class Base: ...
@dataclass_transform
class Mixin: ...
""",
)
def test_pyi_class(self):
with self.DepTree([(
"foo.pyi",
"""
from typing import dataclass_transform
@dataclass_transform
class Mixin:
...
""",
)]):
self.CheckWithErrors("""
import foo
class Base(foo.Mixin):
x: int
class A(Base):
y: str
a = A(x=10, y='foo')
b = A(10) # missing-parameter
c = A(10, 20) # wrong-arg-types
""")
def test_reingest(self):
with self.DepTree([(
"foo.py",
"""
from typing_extensions import dataclass_transform
@dataclass_transform()
class Mixin:
pass
""",
)]):
self.CheckWithErrors("""
import foo
class Base(foo.Mixin):
x: int
class A(Base):
y: str
a = A(x=10, y='foo')
b = A(10) # missing-parameter
c = A(10, 20) # wrong-arg-types
""")
def test_init_subclass_impl(self):
with self.DepTree([(
"foo.py",
"""
import dataclasses
from typing_extensions import dataclass_transform
@dataclass_transform()
class X:
def __init_subclass__(cls):
return dataclasses.dataclass(cls)
""",
)]):
self.CheckWithErrors("""
import foo
class Y(foo.X):
x: int
Y() # missing-parameter
Y(x=0) # ok
Y(x='') # wrong-arg-types
""")
|
TestClass
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/powerbi.py
|
{
"start": 6409,
"end": 8930
}
|
class ____(BaseOperator):
"""
Gets a list of workspaces where the service principal from the connection is assigned as admin.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PowerBIWorkspaceListOperator`
:param conn_id: The connection Id to connect to PowerBI.
:param timeout: The HTTP timeout being used by the `KiotaRequestAdapter`. Default is 1 week (60s * 60m * 24h * 7d).
When no timeout is specified or set to None then there is no HTTP timeout on each request.
:param proxies: A dict defining the HTTP proxies to be used (default is None).
:param api_version: The API version of the Microsoft Graph API to be used (default is v1).
You can pass an enum named APIVersion which has 2 possible members v1 and beta,
or you can pass a string as `v1.0` or `beta`.
"""
def __init__(
self,
*,
conn_id: str = PowerBIHook.default_conn_name,
timeout: float = 60 * 60 * 24 * 7,
proxies: dict | None = None,
api_version: APIVersion | str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hook = PowerBIHook(conn_id=conn_id, proxies=proxies, api_version=api_version, timeout=timeout)
self.conn_id = conn_id
self.timeout = timeout
@property
def proxies(self) -> dict | None:
return self.hook.proxies
@property
def api_version(self) -> str | None:
return self.hook.api_version
def execute(self, context: Context):
"""List visible PowerBI Workspaces."""
self.defer(
trigger=PowerBIWorkspaceListTrigger(
conn_id=self.conn_id,
timeout=self.timeout,
proxies=self.proxies,
api_version=self.api_version,
),
method_name=self.execute_complete.__name__,
)
def execute_complete(self, context: Context, event: dict[str, str]) -> Any:
"""
Return immediately - callback for when the trigger fires.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
context["ti"].xcom_push(
key=f"{self.task_id}.powerbi_workspace_ids",
value=event["workspace_ids"],
)
if event["status"] == "error":
raise AirflowException(event["message"])
|
PowerBIWorkspaceListOperator
|
python
|
python-markdown__markdown
|
markdown/extensions/codehilite.py
|
{
"start": 9778,
"end": 11192
}
|
class ____(Treeprocessor):
""" Highlight source code in code blocks. """
config: dict[str, Any]
def code_unescape(self, text: str) -> str:
"""Unescape code."""
text = text.replace("<", "<")
text = text.replace(">", ">")
# Escaped '&' should be replaced at the end to avoid
# conflicting with < and >.
text = text.replace("&", "&")
return text
def run(self, root: etree.Element) -> None:
""" Find code blocks and store in `htmlStash`. """
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
local_config = self.config.copy()
text = block[0].text
if text is None:
continue
code = CodeHilite(
self.code_unescape(text),
tab_length=self.md.tab_length,
style=local_config.pop('pygments_style', 'default'),
**local_config
)
placeholder = self.md.htmlStash.store(code.hilite())
# Clear code block in `etree` instance
block.clear()
# Change to `p` element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
|
HiliteTreeprocessor
|
python
|
openai__openai-python
|
src/openai/types/chat/chat_completion_token_logprob.py
|
{
"start": 867,
"end": 1769
}
|
class ____(BaseModel):
token: str
"""The token."""
bytes: Optional[List[int]] = None
"""A list of integers representing the UTF-8 bytes representation of the token.
Useful in instances where characters are represented by multiple tokens and
their byte representations must be combined to generate the correct text
representation. Can be `null` if there is no bytes representation for the token.
"""
logprob: float
"""The log probability of this token, if it is within the top 20 most likely
tokens.
Otherwise, the value `-9999.0` is used to signify that the token is very
unlikely.
"""
top_logprobs: List[TopLogprob]
"""List of the most likely tokens and their log probability, at this token
position.
In rare cases, there may be fewer than the number of requested `top_logprobs`
returned.
"""
|
ChatCompletionTokenLogprob
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 198962,
"end": 202228
}
|
class ____(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- ``expr`` - expression that must match prior to the current parse
location
- ``retreat`` - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, :class:`Literal`,
:class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
with a specified exact or maximum length, then the retreat
parameter is not required. Otherwise, retreat must be specified to
give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example:
.. testcode::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr: Union[ParserElement, str], retreat: int = 0) -> None:
super().__init__(expr)
self.expr = self.expr().leave_whitespace()
self._may_return_empty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str_type):
expr = typing.cast(str, expr)
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = f"not preceded by {expr}"
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, do_actions=True) -> ParseImplReturnType:
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg, self)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
return loc, ret
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat) : loc]
last_expr: ParseBaseException = ParseException(instring, loc, self.errmsg, self)
for offset in range(1, min(loc, self.retreat + 1) + 1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
|
PrecededBy
|
python
|
run-llama__llama_index
|
llama-index-integrations/embeddings/llama-index-embeddings-ollama/llama_index/embeddings/ollama/base.py
|
{
"start": 355,
"end": 5083
}
|
class ____(BaseEmbedding):
"""Class for Ollama embeddings."""
base_url: str = Field(description="Base url the model is hosted by Ollama")
model_name: str = Field(description="The Ollama model to use.")
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
ollama_additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Ollama API."
)
query_instruction: Optional[str] = Field(
default=None, description="Instruction to prepend to query text."
)
text_instruction: Optional[str] = Field(
default=None, description="Instruction to prepend to text."
)
_client: Client = PrivateAttr()
_async_client: AsyncClient = PrivateAttr()
def __init__(
self,
model_name: str,
base_url: str = "http://localhost:11434",
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
ollama_additional_kwargs: Optional[Dict[str, Any]] = None,
query_instruction: Optional[str] = None,
text_instruction: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
client_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
super().__init__(
model_name=model_name,
base_url=base_url,
embed_batch_size=embed_batch_size,
ollama_additional_kwargs=ollama_additional_kwargs or {},
query_instruction=query_instruction,
text_instruction=text_instruction,
callback_manager=callback_manager,
**kwargs,
)
client_kwargs = client_kwargs or {}
self._client = Client(host=self.base_url, **client_kwargs)
self._async_client = AsyncClient(host=self.base_url, **client_kwargs)
@classmethod
def class_name(cls) -> str:
return "OllamaEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
formatted_query = self._format_query(query)
return self.get_general_text_embedding(formatted_query)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
formatted_query = self._format_query(query)
return await self.aget_general_text_embedding(formatted_query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
formatted_text = self._format_text(text)
return self.get_general_text_embedding(formatted_text)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
formatted_text = self._format_text(text)
return await self.aget_general_text_embedding(formatted_text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embeddings_list: List[List[float]] = []
for text in texts:
formatted_text = self._format_text(text)
embeddings = self.get_general_text_embedding(formatted_text)
embeddings_list.append(embeddings)
return embeddings_list
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
formatted_texts = [self._format_text(text) for text in texts]
return await asyncio.gather(
*[self.aget_general_text_embedding(text) for text in formatted_texts]
)
def get_general_text_embedding(self, texts: str) -> List[float]:
"""Get Ollama embedding."""
result = self._client.embed(
model=self.model_name, input=texts, options=self.ollama_additional_kwargs
)
return result.embeddings[0]
async def aget_general_text_embedding(self, prompt: str) -> List[float]:
"""Asynchronously get Ollama embedding."""
result = await self._async_client.embed(
model=self.model_name, input=prompt, options=self.ollama_additional_kwargs
)
return result.embeddings[0]
def _format_query(self, query: str) -> str:
"""Format query with instruction if provided."""
if self.query_instruction:
return f"{self.query_instruction.strip()} {query.strip()}".strip()
return query.strip()
def _format_text(self, text: str) -> str:
"""Format text with instruction if provided."""
if self.text_instruction:
return f"{self.text_instruction.strip()} {text.strip()}".strip()
return text.strip()
|
OllamaEmbedding
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 106920,
"end": 107269
}
|
class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("user_id", "client_mutation_id")
user_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="userId")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
|
FollowUserInput
|
python
|
realpython__materials
|
python-mixins/utils.py
|
{
"start": 35,
"end": 290
}
|
class ____:
def __setitem__(self, key, value):
super().__setitem__(key, value)
print(f"Item set: {key=!r}, {value=!r}")
def __delitem__(self, key):
super().__delitem__(key)
print(f"Item deleted: {key=!r}")
|
DebugMixin
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/paramSpec14.py
|
{
"start": 396,
"end": 723
}
|
class ____:
@deco
@classmethod
def identity_cls(cls, val: float) -> float:
return val
@deco
@staticmethod
def identity_static(val: float) -> float:
return val
reveal_type(ClassA.identity_cls(1.2), expected_text="int")
reveal_type(ClassA.identity_static(1.2), expected_text="int")
|
ClassA
|
python
|
PyCQA__pylint
|
pylint/pyreverse/diagrams.py
|
{
"start": 10104,
"end": 13099
}
|
class ____(ClassDiagram):
"""Package diagram handling."""
TYPE = "package"
def modules(self) -> list[PackageEntity]:
"""Return all module nodes in the diagram."""
return [o for o in self.objects if isinstance(o, PackageEntity)]
def module(self, name: str) -> PackageEntity:
"""Return a module by its name, raise KeyError if not found."""
for mod in self.modules():
if mod.node.name == name:
return mod
raise KeyError(name)
def add_object(self, title: str, node: nodes.Module) -> None:
"""Create a diagram object."""
assert node not in self._nodes
ent = PackageEntity(title, node)
self._nodes[node] = ent
self.objects.append(ent)
def get_module(self, name: str, node: nodes.Module) -> PackageEntity:
"""Return a module by its name, looking also for relative imports;
raise KeyError if not found.
"""
for mod in self.modules():
mod_name = mod.node.name
if mod_name == name:
return mod
# search for fullname of relative import modules
package = node.root().name
if mod_name == f"{package}.{name}":
return mod
if mod_name == f"{package.rsplit('.', 1)[0]}.{name}":
return mod
raise KeyError(name)
def add_from_depend(self, node: nodes.ImportFrom, from_module: str) -> None:
"""Add dependencies created by from-imports."""
mod_name = node.root().name
package = self.module(mod_name).node
if from_module in package.depends:
return
if not in_type_checking_block(node):
package.depends.append(from_module)
elif from_module not in package.type_depends:
package.type_depends.append(from_module)
def extract_relationships(self) -> None:
"""Extract relationships between nodes in the diagram."""
super().extract_relationships()
for class_obj in self.classes():
# ownership
try:
mod = self.object_from_node(class_obj.node.root())
self.add_relationship(class_obj, mod, "ownership")
except KeyError:
continue
for package_obj in self.modules():
package_obj.shape = "package"
# dependencies
for dep_name in package_obj.node.depends:
try:
dep = self.get_module(dep_name, package_obj.node)
except KeyError:
continue
self.add_relationship(package_obj, dep, "depends")
for dep_name in package_obj.node.type_depends:
try:
dep = self.get_module(dep_name, package_obj.node)
except KeyError: # pragma: no cover
continue
self.add_relationship(package_obj, dep, "type_depends")
|
PackageDiagram
|
python
|
encode__django-rest-framework
|
tests/test_renderers.py
|
{
"start": 4584,
"end": 10571
}
|
class ____(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, b'')
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEqual(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type + '; charset=utf-8')
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_parse_error_renderers_browsable_api(self):
"""Invalid data should still render the browsable API correctly."""
resp = self.client.post('/parseerror', data='foobar', content_type='application/json', HTTP_ACCEPT='text/html')
self.assertEqual(resp['Content-Type'], 'text/html; charset=utf-8')
self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)
def test_204_no_content_responses_have_no_content_type_set(self):
"""
Regression test for #1196
https://github.com/encode/django-rest-framework/issues/1196
"""
resp = self.client.get('/empty')
self.assertEqual(resp.get('Content-Type', None), None)
self.assertEqual(resp.status_code, status.HTTP_204_NO_CONTENT)
def test_contains_headers_of_api_response(self):
"""
Issue #1437
Test we display the headers of the API response and not those from the
HTML response
"""
resp = self.client.get('/html1')
self.assertContains(resp, '>GET, HEAD, OPTIONS<')
self.assertContains(resp, '>application/json<')
self.assertNotContains(resp, '>text/html; charset=utf-8<')
_flat_repr = '{"foo":["bar","baz"]}'
_indented_repr = '{\n "foo": [\n "bar",\n "baz"\n ]\n}'
def strip_trailing_whitespace(content):
"""
Seems to be some inconsistencies re. trailing whitespace with
different versions of the json lib.
"""
return re.sub(' +\n', '\n', content)
|
RendererEndToEndTests
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/enum1.py
|
{
"start": 8165,
"end": 8264
}
|
class ____(TestEnum22Base):
A = 1
reveal_type(TestEnum22.A.value, expected_text="str")
|
TestEnum22
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/exporters/CSVExporter.py
|
{
"start": 264,
"end": 4580
}
|
class ____(Exporter):
Name = "CSV of original plot data"
windows = []
def __init__(self, item):
Exporter.__init__(self, item)
self.params = Parameter.create(name='params', type='group', children=[
{
'name': 'separator',
'title': translate("Exporter", 'separator'),
'type': 'list',
'value': 'comma',
'limits': ['comma', 'tab']
},
{
'name': 'precision',
'title': translate("Exporter", 'precision'),
'type': 'int',
'value': 10,
'limits': [0, None]
},
{
'name': 'columnMode',
'title': translate("Exporter", 'columnMode'),
'type': 'list',
'limits': ['(x,y) per plot', '(x,y,y,y) for all plots'],
'value': '(x,y) per plot',
}
])
self.index_counter = itertools.count(start=0)
self.header = []
self.data = []
def parameters(self):
return self.params
def _exportErrorBarItem(self, errorBarItem: ErrorBarItem) -> None:
error_data = []
index = next(self.index_counter)
# make sure the plot actually has data:
if errorBarItem.opts['x'] is None or errorBarItem.opts['y'] is None:
return None
header_naming_map = {
"left": "x_min_error",
"right": "x_max_error",
"bottom": "y_min_error",
"top": "y_max_error"
}
# grab the base-points
self.header.extend([f'x{index:04}_error', f'y{index:04}_error'])
error_data.extend([errorBarItem.opts['x'], errorBarItem.opts['y']])
# grab the error bars
for error_direction, header_label in header_naming_map.items():
if (error := errorBarItem.opts[error_direction]) is not None:
self.header.extend([f'{header_label}_{index:04}'])
error_data.append(error)
self.data.append(tuple(error_data))
return None
def _exportPlotDataItem(self, plotDataItem) -> None:
cd = plotDataItem.getOriginalDataset()
if cd[0] is None:
# no data found, break out...
return None
index = next(self.index_counter)
if plotDataItem.name() is not None:
name = plotDataItem.name().replace('"', '""') + '_'
xName = f"{name}x"
yName = f"{name}y"
else:
xName = f'x{index:04}'
yName = f'y{index:04}'
appendAllX = self.params['columnMode'] == '(x,y) per plot'
if appendAllX or index == 0:
self.header.extend([xName, yName])
self.data.append(cd)
else:
self.header.extend([yName])
self.data.append([cd[1]])
return None
def export(self, fileName=None):
if not isinstance(self.item, PlotItem):
raise TypeError("Must have a PlotItem selected for CSV export.")
if fileName is None:
self.fileSaveDialog(filter=["*.csv", "*.tsv"])
return
for item in self.item.items:
if isinstance(item, ErrorBarItem):
self._exportErrorBarItem(item)
elif hasattr(item, 'implements') and item.implements('plotData'):
self._exportPlotDataItem(item)
sep = "," if self.params['separator'] == 'comma' else "\t"
# we want to flatten the nested arrays of data into columns
columns = [column for dataset in self.data for column in dataset]
with open(fileName, 'w', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=sep, quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.header)
for row in itertools.zip_longest(*columns, fillvalue=""):
row_to_write = [
item if isinstance(item, str)
else np.format_float_positional(
item, precision=self.params['precision']
)
for item in row
]
writer.writerow(row_to_write)
self.header.clear()
self.data.clear()
CSVExporter.register()
|
CSVExporter
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_idiv.py
|
{
"start": 129,
"end": 1056
}
|
class ____(CUDATestCase):
def test_inplace_div(self):
@cuda.jit(void(float32[:, :], int32, int32))
def div(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float32)
grid = cuda.to_device(x)
div[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
def test_inplace_div_double(self):
@cuda.jit(void(float64[:, :], int32, int32))
def div_double(grid, l_x, l_y):
for x in range(l_x):
for y in range(l_y):
grid[x, y] /= 2.0
x = np.ones((2, 2), dtype=np.float64)
grid = cuda.to_device(x)
div_double[1, 1](grid, 2, 2)
y = grid.copy_to_host()
self.assertTrue(np.all(y == 0.5))
if __name__ == '__main__':
unittest.main()
|
TestCudaIDiv
|
python
|
Lightning-AI__lightning
|
examples/pytorch/domain_templates/semantic_segmentation.py
|
{
"start": 1783,
"end": 5595
}
|
class ____(Dataset):
"""Class for KITTI Semantic Segmentation Benchmark dataset.
Dataset link - http://www.cvlibs.net/datasets/kitti/eval_semseg.php?benchmark=semantics2015
There are 34 classes in the given labels. However, not all of them are useful for training
(like railings on highways, road dividers, etc.).
So, these useless classes (the pixel values of these classes) are stored in the `void_labels`.
The useful classes are stored in the `valid_labels`.
The `encode_segmap` function sets all pixels with any of the `void_labels` to `ignore_index`
(250 by default). It also sets all of the valid pixels to the appropriate value between 0 and
`len(valid_labels)` (since that is the number of valid classes), so it can be used properly by
the loss function when comparing with the output.
The `get_filenames` function retrieves the filenames of all images in the given `path` and
saves the absolute path in a list.
In the `get_item` function, images and masks are resized to the given `img_size`, masks are
encoded using `encode_segmap`, and given `transform` (if any) are applied to the image only
(mask does not usually require transforms, but they can be implemented in a similar way).
"""
IMAGE_PATH = os.path.join("training", "image_2")
MASK_PATH = os.path.join("training", "semantic")
def __init__(
self,
data_path: str,
split: str,
img_size: tuple = (1242, 376),
void_labels: list = DEFAULT_VOID_LABELS,
valid_labels: list = DEFAULT_VALID_LABELS,
transform=None,
):
self.img_size = img_size
self.void_labels = void_labels
self.valid_labels = valid_labels
self.ignore_index = 250
self.class_map = dict(zip(self.valid_labels, range(len(self.valid_labels))))
self.transform = transform
self.split = split
self.data_path = data_path
self.img_path = os.path.join(self.data_path, self.IMAGE_PATH)
self.mask_path = os.path.join(self.data_path, self.MASK_PATH)
self.img_list = self.get_filenames(self.img_path)
self.mask_list = self.get_filenames(self.mask_path)
# Split between train and valid set (80/20)
random_inst = random.Random(12345) # for repeatability
n_items = len(self.img_list)
idxs = random_inst.sample(range(n_items), n_items // 5)
if self.split == "train":
idxs = [idx for idx in range(n_items) if idx not in idxs]
self.img_list = [self.img_list[i] for i in idxs]
self.mask_list = [self.mask_list[i] for i in idxs]
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
img = Image.open(self.img_list[idx])
img = img.resize(self.img_size)
img = torch.tensor(img)
mask = Image.open(self.mask_list[idx]).convert("L")
mask = mask.resize(self.img_size)
mask = torch.tensor(mask)
mask = self.encode_segmap(mask)
if self.transform:
img = self.transform(img)
return img, mask
def encode_segmap(self, mask):
"""Sets void classes to zero so they won't be considered for training."""
for voidc in self.void_labels:
mask[mask == voidc] = self.ignore_index
for validc in self.valid_labels:
mask[mask == validc] = self.class_map[validc]
# remove extra idxs from updated dataset
mask[mask > 18] = self.ignore_index
return mask
def get_filenames(self, path):
"""Returns a list of absolute paths to images inside given `path`"""
files_list = []
for filename in os.listdir(path):
files_list.append(os.path.join(path, filename))
return files_list
|
KITTI
|
python
|
networkx__networkx
|
networkx/classes/tests/dispatch_interface.py
|
{
"start": 873,
"end": 954
}
|
class ____(MultiGraph):
__networkx_backend__ = "nx_loopback"
|
LoopbackMultiGraph
|
python
|
huggingface__transformers
|
src/transformers/models/swin/modeling_swin.py
|
{
"start": 1539,
"end": 2487
}
|
class ____(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Swin model's outputs that also contains a pooling of the last hidden states.
"""
)
|
SwinEncoderOutput
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.