language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/types/realtime/audio_transcription.py | {
"start": 223,
"end": 1332
} | class ____(BaseModel):
language: Optional[str] = None
"""The language of the input audio.
Supplying the input language in
[ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
format will improve accuracy and latency.
"""
model: Optional[
Literal["whisper-1", "gpt-4o-mini-transcribe", "gpt-4o-transcribe", "gpt-4o-transcribe-diarize"]
] = None
"""The model to use for transcription.
Current options are `whisper-1`, `gpt-4o-mini-transcribe`, `gpt-4o-transcribe`,
and `gpt-4o-transcribe-diarize`. Use `gpt-4o-transcribe-diarize` when you need
diarization with speaker labels.
"""
prompt: Optional[str] = None
"""
An optional text to guide the model's style or continue a previous audio
segment. For `whisper-1`, the
[prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
For `gpt-4o-transcribe` models (excluding `gpt-4o-transcribe-diarize`), the
prompt is a free text string, for example "expect words related to technology".
"""
| AudioTranscription |
python | pytorch__pytorch | torch/_inductor/runtime/triton_heuristics.py | {
"start": 143412,
"end": 143860
} | class ____(ComboKernelGrid):
def combo_x_grid(
self,
xnumels: list[int | str],
no_x_dims: list[bool],
meta: dict[str, int],
) -> str | int:
assert len(xnumels) == len(no_x_dims)
return self.summation(
[
self.ceildiv(x, 1 if no_x_dim else meta.get("XBLOCK"))
for x, no_x_dim in zip(xnumels, no_x_dims)
]
)
| SequentialComboKernelGrid |
python | doocs__leetcode | solution/3300-3399/3320.Count The Number of Winning Sequences/Solution.py | {
"start": 0,
"end": 803
} | class ____:
def countWinningSequences(self, s: str) -> int:
def calc(x: int, y: int) -> int:
if x == y:
return 0
if x < y:
return 1 if x == 0 and y == 2 else -1
return -1 if x == 2 and y == 0 else 1
@cache
def dfs(i: int, j: int, k: int) -> int:
if len(s) - i <= j:
return 0
if i >= len(s):
return int(j < 0)
res = 0
for l in range(3):
if l == k:
continue
res = (res + dfs(i + 1, j + calc(d[s[i]], l), l)) % mod
return res
mod = 10**9 + 7
d = {"F": 0, "W": 1, "E": 2}
ans = dfs(0, 0, -1)
dfs.cache_clear()
return ans
| Solution |
python | eth-brownie__brownie | brownie/typing.py | {
"start": 1791,
"end": 1876
} | class ____(TypedDict):
statements: StatementMap
branches: BranchMap
| CoverageMap |
python | keras-team__keras | keras/src/backend/common/backend_utils_test.py | {
"start": 6647,
"end": 8883
} | class ____(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test computation with 'valid' padding and no output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="valid",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(output_shape, 11)
def test_same_padding_without_output_padding(self):
"""Test computation with 'same' padding and no output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="same",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(output_shape, 10)
def test_valid_padding_with_output_padding(self):
"""Test computation with 'valid' padding and output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="valid",
output_padding=1,
dilation_rate=1,
)
self.assertEqual(output_shape, 12)
def test_warning_for_inconsistencies(self):
"""Test that a warning is raised for potential inconsistencies"""
with self.assertWarns(Warning):
_convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=1,
)
def test_same_padding_without_output_padding_for_torch_(self):
"""Test conversion with 'same' padding and no output padding."""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=None,
)
self.assertEqual(torch_padding, max(-((3 % 2 - 3) // 2), 0))
self.assertEqual(torch_output_padding, 1)
| GetOutputShapeGivenTFPaddingTest |
python | astropy__astropy | astropy/modeling/optimizers.py | {
"start": 2195,
"end": 5047
} | class ____(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ["bounds", "eqcons", "ineqcons", "fixed", "tied"]
def __init__(self):
from scipy.optimize import fmin_slsqp
self._init_opt_method(fmin_slsqp)
self.fit_info = {
"final_func_val": None,
"numiter": None,
"exit_mode": None,
"message": None,
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs["iter"] = kwargs.pop("maxiter", self._maxiter)
if "epsilon" not in kwargs:
kwargs["epsilon"] = self._eps
if "acc" not in kwargs:
kwargs["acc"] = self._acc
# Get the verbosity level
disp = kwargs.pop("verblevel", None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc,
initval,
args=fargs,
full_output=True,
disp=disp,
bounds=bounds,
eqcons=eqcons,
ieqcons=ineqcons,
**kwargs,
)
self.fit_info["final_func_val"] = final_func_val
self.fit_info["numiter"] = numiter
self.fit_info["exit_mode"] = exit_mode
self.fit_info["message"] = mess
if exit_mode != 0:
warnings.warn(
"The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning,
)
return fitparams, self.fit_info
| SLSQP |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_workflow_index.py | {
"start": 275,
"end": 1918
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-detector-workflow-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.unconnected_workflow = self.create_workflow(organization_id=self.organization.id)
self.unconnected_detector = self.create_detector()
self.workflow_1 = self.create_workflow(organization_id=self.organization.id)
self.workflow_2 = self.create_workflow(organization_id=self.organization.id)
self.detector_1 = self.create_detector()
self.detector_2 = self.create_detector()
self.detector_1_workflow_1 = self.create_detector_workflow(
detector=self.detector_1, workflow=self.workflow_1
)
self.detector_1_workflow_2 = self.create_detector_workflow(
detector=self.detector_1, workflow=self.workflow_2
)
self.detector_2_workflow_1 = self.create_detector_workflow(
detector=self.detector_2, workflow=self.workflow_1
)
self.team_admin_user = self.create_user()
self.create_member(
team_roles=[(self.team, "admin")],
user=self.team_admin_user,
role="member",
organization=self.organization,
)
self.member_user = self.create_user()
self.create_member(
team_roles=[(self.team, "contributor")],
user=self.member_user,
role="member",
organization=self.organization,
)
def tearDown(self) -> None:
return super().tearDown()
@region_silo_test
| OrganizationDetectorWorkflowAPITestCase |
python | gevent__gevent | src/greentest/3.14/test__interpreters.py | {
"start": 4749,
"end": 5481
} | class ____(TestBase):
def test_main(self):
main, *_ = _interpreters.get_main()
cur, *_ = _interpreters.get_current()
self.assertEqual(cur, main)
self.assertIsInstance(cur, int)
def test_subinterpreter(self):
main, *_ = _interpreters.get_main()
interp = _interpreters.create()
out = _run_output(interp, dedent("""
import _interpreters
cur, *_ = _interpreters.get_current()
print(cur)
assert isinstance(cur, int)
"""))
cur = int(out.strip())
_, expected = [id for id, *_ in _interpreters.list_all()]
self.assertEqual(cur, expected)
self.assertNotEqual(cur, main)
| GetCurrentTests |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 22470,
"end": 23671
} | class ____(Module):
r"""Applies the Hard Shrinkage (Hardshrink) function element-wise.
Hardshrink is defined as:
.. math::
\text{HardShrink}(x) =
\begin{cases}
x, & \text{ if } x > \lambda \\
x, & \text{ if } x < -\lambda \\
0, & \text{ otherwise }
\end{cases}
Args:
lambd: the :math:`\lambda` value for the Hardshrink formulation. Default: 0.5
Shape:
- Input: :math:`(*)`, where :math:`*` means any number of dimensions.
- Output: :math:`(*)`, same shape as the input.
.. image:: ../scripts/activation_images/Hardshrink.png
Examples::
>>> m = nn.Hardshrink()
>>> input = torch.randn(2)
>>> output = m(input)
"""
__constants__ = ["lambd"]
lambd: float
def __init__(self, lambd: float = 0.5) -> None:
super().__init__()
self.lambd = lambd
def forward(self, input: Tensor) -> Tensor:
"""
Run forward pass.
"""
return F.hardshrink(input, self.lambd)
def extra_repr(self) -> str:
"""
Return the extra representation of the module.
"""
return f"{self.lambd}"
| Hardshrink |
python | great-expectations__great_expectations | great_expectations/metrics/column/null_count.py | {
"start": 182,
"end": 324
} | class ____(ColumnMetric[ColumnNullCountResult]):
"""Count of null values in a column"""
name = "column_values.null.count"
| ColumnNullCount |
python | doocs__leetcode | solution/1400-1499/1463.Cherry Pickup II/Solution2.py | {
"start": 0,
"end": 774
} | class ____:
def cherryPickup(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
f = [[-1] * n for _ in range(n)]
g = [[-1] * n for _ in range(n)]
f[0][n - 1] = grid[0][0] + grid[0][n - 1]
for i in range(1, m):
for j1 in range(n):
for j2 in range(n):
x = grid[i][j1] + (0 if j1 == j2 else grid[i][j2])
for y1 in range(j1 - 1, j1 + 2):
for y2 in range(j2 - 1, j2 + 2):
if 0 <= y1 < n and 0 <= y2 < n and f[y1][y2] != -1:
g[j1][j2] = max(g[j1][j2], f[y1][y2] + x)
f, g = g, f
return max(f[j1][j2] for j1, j2 in product(range(n), range(n)))
| Solution |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 1076,
"end": 2830
} | class ____:
a = attr.ib(eq=True, order=str.lower)
b = attr.ib(order=True)
# HashC is hashable by explicit definition while HashCSlots is hashable
# implicitly. The "Cached" versions are the same, except with hash code
# caching enabled
HashC = simple_class(unsafe_hash=True)
HashCSlots = simple_class(unsafe_hash=None, eq=True, frozen=True, slots=True)
HashCCached = simple_class(unsafe_hash=True, cache_hash=True)
HashCSlotsCached = simple_class(
unsafe_hash=None, eq=True, frozen=True, slots=True, cache_hash=True
)
# the cached hash code is stored slightly differently in this case
# so it needs to be tested separately
HashCFrozenNotSlotsCached = simple_class(
frozen=True, slots=False, unsafe_hash=True, cache_hash=True
)
def _add_init(cls, frozen):
"""
Add a __init__ method to *cls*. If *frozen* is True, make it immutable.
This function used to be part of _make. It wasn't used anymore however
the tests for it are still useful to test the behavior of _make_init.
"""
has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
script, globs, annots = _make_init_script(
cls,
cls.__attrs_attrs__,
has_pre_init,
(
len(inspect.signature(cls.__attrs_pre_init__).parameters) > 1
if has_pre_init
else False
),
getattr(cls, "__attrs_post_init__", False),
frozen,
"__slots__" in cls.__dict__,
cache_hash=False,
base_attr_map={},
is_exc=False,
cls_on_setattr=None,
attrs_init=False,
)
_compile_and_eval(script, globs, filename="__init__")
cls.__init__ = globs["__init__"]
cls.__init__.__annotations__ = annots
return cls
| OrderCallableCSlots |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_graph.py | {
"start": 7530,
"end": 7947
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
assetKey = graphene.NonNull(GrapheneAssetKey)
latestMaterialization = graphene.Field(GrapheneMaterializationEvent)
unstartedRunIds = non_null_list(graphene.String)
inProgressRunIds = non_null_list(graphene.String)
latestRun = graphene.Field(GrapheneRun)
class Meta:
name = "AssetLatestInfo"
| GrapheneAssetLatestInfo |
python | great-expectations__great_expectations | great_expectations/metrics/query/data_source_table.py | {
"start": 211,
"end": 270
} | class ____(MetricResult[Any]): ...
| QueryDataSourceTableResult |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 3099,
"end": 3605
} | class ____(TagType):
__slots__ = ("group_id", "key", "value", "times_seen", "first_seen", "last_seen")
_sort_key = "value"
def __init__(
self,
group_id: int,
key: str,
value: str | None,
times_seen: int,
first_seen,
last_seen,
):
self.group_id = group_id
self.key = key
self.value = value
self.times_seen = times_seen
self.first_seen = first_seen
self.last_seen = last_seen
| GroupTagValue |
python | joke2k__faker | faker/providers/color/hu_HU/__init__.py | {
"start": 43,
"end": 430
} | class ____(ColorProvider):
"""Implement color provider for ``hu_HU`` locale."""
safe_colors = (
"fekete",
"bordó",
"zöld",
"királykék",
"oliva",
"bíbor",
"kékeszöld",
"citromzöld",
"kék",
"ezüst",
"szürke",
"sárga",
"mályva",
"akvamarin",
"fehér",
)
| Provider |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_pretty.py | {
"start": 2884,
"end": 3374
} | class ____:
def __init__(self, content):
self.content = content
def _repr_pretty_(self, p, cycle):
if cycle:
p.text("MyList(...)")
else:
with p.group(3, "MyList(", ")"):
for i, child in enumerate(self.content):
if i:
p.text(",")
p.breakable()
else:
p.breakable("")
p.pretty(child)
| MyList |
python | django__django | tests/forms_tests/widget_tests/test_multiplehiddeninput.py | {
"start": 159,
"end": 4032
} | class ____(WidgetTest):
widget = MultipleHiddenInput()
def test_render_single(self):
self.check_html(
self.widget,
"email",
["test@example.com"],
html='<input type="hidden" name="email" value="test@example.com">',
)
def test_render_multiple(self):
self.check_html(
self.widget,
"email",
["test@example.com", "foo@example.com"],
html=(
'<input type="hidden" name="email" value="test@example.com">\n'
'<input type="hidden" name="email" value="foo@example.com">'
),
)
def test_render_attrs(self):
self.check_html(
self.widget,
"email",
["test@example.com"],
attrs={"class": "fun"},
html=(
'<input type="hidden" name="email" value="test@example.com" '
'class="fun">'
),
)
def test_render_attrs_multiple(self):
self.check_html(
self.widget,
"email",
["test@example.com", "foo@example.com"],
attrs={"class": "fun"},
html=(
'<input type="hidden" name="email" value="test@example.com" '
'class="fun">\n'
'<input type="hidden" name="email" value="foo@example.com" class="fun">'
),
)
def test_render_attrs_constructor(self):
widget = MultipleHiddenInput(attrs={"class": "fun"})
self.check_html(widget, "email", [], "")
self.check_html(
widget,
"email",
["foo@example.com"],
html=(
'<input type="hidden" class="fun" value="foo@example.com" name="email">'
),
)
self.check_html(
widget,
"email",
["foo@example.com", "test@example.com"],
html=(
'<input type="hidden" class="fun" value="foo@example.com" '
'name="email">\n'
'<input type="hidden" class="fun" value="test@example.com" '
'name="email">'
),
)
self.check_html(
widget,
"email",
["foo@example.com"],
attrs={"class": "special"},
html=(
'<input type="hidden" class="special" value="foo@example.com" '
'name="email">'
),
)
def test_render_empty(self):
self.check_html(self.widget, "email", [], "")
def test_render_none(self):
self.check_html(self.widget, "email", None, "")
def test_render_increment_id(self):
"""
Each input should get a separate ID.
"""
self.check_html(
self.widget,
"letters",
["a", "b", "c"],
attrs={"id": "hideme"},
html=(
'<input type="hidden" name="letters" value="a" id="hideme_0">\n'
'<input type="hidden" name="letters" value="b" id="hideme_1">\n'
'<input type="hidden" name="letters" value="c" id="hideme_2">'
),
)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=MultipleHiddenInput,
)
form = TestForm(MultiValueDict({"composers": ["J", "P"]}))
self.assertIs(self.widget.use_fieldset, False)
self.assertHTMLEqual(
'<input type="hidden" name="composers" value="J" id="id_composers_0">'
'<input type="hidden" name="composers" value="P" id="id_composers_1">',
form.render(),
)
| MultipleHiddenInputTest |
python | django-extensions__django-extensions | tests/management/commands/test_delete_squashed_migrations.py | {
"start": 4067,
"end": 8037
} | class ____(BaseDeleteSquashedMigrationsTestCase):
"""Tests for delete_squashed_migrations command."""
@patch(
"django_extensions.management.commands.delete_squashed_migrations.six.moves.input"
)
def test_should_delete_squashed_migrations(self, m_input):
m_input.return_value = "y"
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command("squashmigrations", "testapp_with_appconfig", "0002", "--noinput")
call_command("delete_squashed_migrations", "testapp_with_appconfig")
self.assertFalse(self.migration_exists("0001_initial.py"))
self.assertFalse(self.migration_exists("0002_namemodel.py"))
self.assertTrue(self.migration_exists("0001_squashed_0002_namemodel.py"))
def test_should_delete_squashed_migrations_if_interactive_mode_is_set_to_False(
self,
):
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command("squashmigrations", "testapp_with_appconfig", "0002", "--noinput")
call_command(
"delete_squashed_migrations", "testapp_with_appconfig", interactive=False
)
self.assertFalse(self.migration_exists("0001_initial.py"))
self.assertFalse(self.migration_exists("0002_namemodel.py"))
self.assertTrue(self.migration_exists("0001_squashed_0002_namemodel.py"))
@patch(
"django_extensions.management.commands.delete_squashed_migrations.six.moves.input"
)
def test_should_not_delete_anything(self, m_input):
m_input.return_value = None
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command("squashmigrations", "testapp_with_appconfig", "0002", "--noinput")
call_command("delete_squashed_migrations", "testapp_with_appconfig")
self.assertTrue(self.migration_exists("0001_initial.py"))
self.assertTrue(self.migration_exists("0002_namemodel.py"))
self.assertTrue(self.migration_exists("0001_squashed_0002_namemodel.py"))
def test_should_not_delete_files_for_given_squashed_migration(self):
class NameModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command("squashmigrations", "testapp_with_appconfig", "0002", "--noinput")
class FooModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=50)
class Meta:
app_label = "testapp_with_appconfig"
call_command("makemigrations", "testapp_with_appconfig")
call_command(
"delete_squashed_migrations",
"testapp_with_appconfig",
"0001_squashed_0002_namemodel",
interactive=False,
)
self.assertFalse(self.migration_exists("0001_initial.py"))
self.assertFalse(self.migration_exists("0002_namemodel.py"))
self.assertTrue(self.migration_exists("0001_squashed_0002_namemodel.py"))
self.assertTrue(self.migration_exists("0002_foomodel.py"))
| DeleteSquashedMigrationsTests |
python | django-haystack__django-haystack | test_haystack/simple_tests/search_indexes.py | {
"start": 379,
"end": 618
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
score = indexes.CharField(model_attr="score")
def get_model(self):
return ScoreMockModel
| SimpleMockScoreIndex |
python | pytorch__pytorch | torch/_inductor/compile_worker/subproc_pool.py | {
"start": 2710,
"end": 3152
} | class ____(Exception):
"""
Thrown when a job in a subprocess raises an Exception.
"""
def __init__(self, details: str, name: str = "<unknown>") -> None:
self.details = details
super().__init__(
f"An exception occurred in a subprocess:\n\nName={name}\n{details}"
)
def with_name(self, name: str) -> "SubprocException":
return SubprocException(self.details, name)
| SubprocException |
python | kamyu104__LeetCode-Solutions | Python/number-of-unique-categories.py | {
"start": 158,
"end": 454
} | class ____(object):
def numberOfCategories(self, n, categoryHandler):
"""
:type n: int
:type categoryHandler: CategoryHandler
:rtype: int
"""
return sum(all(not categoryHandler.haveSameCategory(j, i) for j in xrange(i)) for i in xrange(n))
| Solution |
python | pytorch__pytorch | torch/ao/pruning/_experimental/data_scheduler/base_data_scheduler.py | {
"start": 212,
"end": 7733
} | class ____:
r"""
The BaseDataScheduler is the abstract scheduler class specifically for the
BaseDataSparsifier class. This class controls a specific hyperparameter of
the sparsifier class and varies it across the training process (or across time).
Args:
data_sparsifier (instance of BaseDataSparsifier)
Implemented class data sparsifier class wherein the update_mask is implemented
schedule_param (str)
A specific hyperparameter of the passed sparsifier that needs to be scheduled/varied
last_epoch (int, default=-1)
This is specifically is passed when training needs to be resumed from a particular
point.
verbose (bool, default=False)
Verbosity of the BaseDataScheduler
The *get_hyperparam()* function needs to be implemented by the user.
"""
def __init__(
self, data_sparsifier, schedule_param: str, last_epoch=-1, verbose=False
):
# Attach sparsifier
if not isinstance(data_sparsifier, BaseDataSparsifier):
raise TypeError(
f"{type(data_sparsifier).__name__} is not an instance of torch.ao.pruning.BaseDataSparsifier"
)
self.data_sparsifier = data_sparsifier
self.schedule_param = schedule_param
# Initialize epoch and base hyper-params
self.base_param = {
name: config.get(schedule_param, None)
for name, config in self.data_sparsifier.data_groups.items()
}
self.last_epoch = last_epoch
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `scheduler.step()` is called after
# `sparsifier.step()`
def with_counter(method):
if getattr(method, "_with_counter", False):
# `sparsifier.step()` has already been replaced, return.
return method
# Keep a weak reference to the sparsifier instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1 # type: ignore[union-attr]
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True # type: ignore[attr-defined]
return wrapper
self.data_sparsifier.step = with_counter(self.data_sparsifier.step) # type: ignore[assignment]
self.data_sparsifier._step_count = 0 # type: ignore[attr-defined]
self._step_count: int = 0
self.verbose = verbose
# Housekeeping
self._get_sp_called_within_step: bool = False # sp -> schedule parameter
self.step()
@abc.abstractmethod
def get_schedule_param(self):
r"""
Abstract method that needs to be implemented by the child class.
The expected return type should is a dictionary of name to schedule_param value
The returned values will be updated in sparsifier when the scheduler step() function
is called.
Example:
>>> def get_schedule_param(self):
... new_param = {}
... for name in self.sparsifier.data_groups.keys():
... new_param[name] = (
... self.sparsifier.data_groups[name][self.schedule_param] * 0.5
... )
... return new_param
When the step() function is called, the value in self.sparsifier.data_groups[name][self.schedule_param]
would be halved
"""
raise NotImplementedError
def __repr__(self):
format_string = self.__class__.__name__ + " ("
format_string += "\n"
format_string += f"Data Sparsifier {self.data_sparsifier}\n"
format_string += f" {self.schedule_param}: {self.base_param}\n"
format_string += ")"
return format_string
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the sparsifier.
Note:
The scheduler class does not track the state of the data_sparsifier.
Make sure to store the state of the sparsifier before storing the
state of the scheduler
"""
return {
key: value
for key, value in self.__dict__.items()
if key != "data_sparsifier"
}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Note:
Remember to restore the state of the data_sparsifier before the scheduler.
Args:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_last_param(self):
return self._last_param
def step(self):
# Raise warning if trying to call scheduler step before the sparsifier.
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.data_sparsifier.step, "_with_counter"):
warnings.warn(
"Seems like `data_sparsifier.step()` has been overridden after sparsity scheduler "
"initialization. Please, make sure to call `data_sparsifier.step()` before "
"`scheduler.step()`.",
UserWarning,
stacklevel=2,
)
# Just check if there were two first scheduler.step() calls before sparsifier.step()
elif self.data_sparsifier._step_count < 1: # type: ignore[attr-defined]
warnings.warn(
"Detected call of `scheduler.step()` before `data_sparsifier.step()`. "
"You have to make sure you run the data_sparsifier.step() BEFORE any "
"calls to the scheduler.step().",
UserWarning,
stacklevel=2,
)
self._step_count += 1
class _enable_get_sp_call:
def __init__(self, o):
self.o = o
def __enter__(self):
self.o._get_sp_called_within_step = True
return self
def __exit__(self, type, value, traceback):
self.o._get_sp_called_within_step = False
with _enable_get_sp_call(self):
self.last_epoch += 1
updated_scheduler_params = self.get_schedule_param()
for name, param in updated_scheduler_params.items():
self.data_sparsifier.data_groups[name][self.schedule_param] = param
if self.verbose:
print(f"Adjusting {self.schedule_param} for group {name} to {param}")
self._last_param = {
name: config.get(self.schedule_param, None)
for name, config in self.data_sparsifier.data_groups.items()
}
self.data_sparsifier.enable_mask_update = True
| BaseDataScheduler |
python | run-llama__llama_index | llama-index-core/llama_index/core/base/embeddings/base.py | {
"start": 2014,
"end": 22646
} | class ____(TransformComponent, DispatcherSpanMixin):
"""Base class for embeddings."""
model_config = ConfigDict(
protected_namespaces=("pydantic_model_",), arbitrary_types_allowed=True
)
model_name: str = Field(
default="unknown", description="The name of the embedding model."
)
embed_batch_size: int = Field(
default=DEFAULT_EMBED_BATCH_SIZE,
description="The batch size for embedding calls.",
gt=0,
le=2048,
)
callback_manager: CallbackManager = Field(
default_factory=lambda: CallbackManager([]), exclude=True
)
num_workers: Optional[int] = Field(
default=None,
description="The number of workers to use for async embedding calls.",
)
# Use Any to avoid import loops
embeddings_cache: Optional[Any] = Field(
default=None,
description="Cache for the embeddings: if None, the embeddings are not cached",
)
@model_validator(mode="after")
def check_base_embeddings_class(self) -> Self:
from llama_index.core.storage.kvstore.types import BaseKVStore
if self.callback_manager is None:
self.callback_manager = CallbackManager([])
if self.embeddings_cache is not None and not isinstance(
self.embeddings_cache, BaseKVStore
):
raise TypeError("embeddings_cache must be of type BaseKVStore")
return self
@abstractmethod
def _get_query_embedding(self, query: str) -> Embedding:
"""
Embed the input query synchronously.
Subclasses should implement this method. Reference get_query_embedding's
docstring for more information.
"""
@abstractmethod
async def _aget_query_embedding(self, query: str) -> Embedding:
"""
Embed the input query asynchronously.
Subclasses should implement this method. Reference get_query_embedding's
docstring for more information.
"""
@dispatcher.span
def get_query_embedding(self, query: str) -> Embedding:
"""
Embed the input query.
When embedding a query, depending on the model, a special instruction
can be prepended to the raw query string. For example, "Represent the
question for retrieving supporting documents: ". If you're curious,
other examples of predefined instructions can be found in
embeddings/huggingface_utils.py.
"""
model_dict = self.to_dict()
model_dict.pop("api_key", None)
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
with self.callback_manager.event(
CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
) as event:
if not self.embeddings_cache:
query_embedding = self._get_query_embedding(query)
elif self.embeddings_cache is not None:
cached_emb = self.embeddings_cache.get(
key=query, collection="embeddings"
)
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
query_embedding = cached_emb[cached_key]
else:
query_embedding = self._get_query_embedding(query)
self.embeddings_cache.put(
key=query,
val={str(uuid.uuid4()): query_embedding},
collection="embeddings",
)
event.on_end(
payload={
EventPayload.CHUNKS: [query],
EventPayload.EMBEDDINGS: [query_embedding],
},
)
dispatcher.event(
EmbeddingEndEvent(
chunks=[query],
embeddings=[query_embedding],
)
)
return query_embedding
@dispatcher.span
async def aget_query_embedding(self, query: str) -> Embedding:
"""Get query embedding."""
model_dict = self.to_dict()
model_dict.pop("api_key", None)
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
with self.callback_manager.event(
CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
) as event:
if not self.embeddings_cache:
query_embedding = await self._aget_query_embedding(query)
elif self.embeddings_cache is not None:
cached_emb = await self.embeddings_cache.aget(
key=query, collection="embeddings"
)
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
query_embedding = cached_emb[cached_key]
else:
query_embedding = await self._aget_query_embedding(query)
await self.embeddings_cache.aput(
key=query,
val={str(uuid.uuid4()): query_embedding},
collection="embeddings",
)
event.on_end(
payload={
EventPayload.CHUNKS: [query],
EventPayload.EMBEDDINGS: [query_embedding],
},
)
dispatcher.event(
EmbeddingEndEvent(
chunks=[query],
embeddings=[query_embedding],
)
)
return query_embedding
def get_agg_embedding_from_queries(
self,
queries: List[str],
agg_fn: Optional[Callable[..., Embedding]] = None,
) -> Embedding:
"""Get aggregated embedding from multiple queries."""
query_embeddings = [self.get_query_embedding(query) for query in queries]
agg_fn = agg_fn or mean_agg
return agg_fn(query_embeddings)
async def aget_agg_embedding_from_queries(
self,
queries: List[str],
agg_fn: Optional[Callable[..., Embedding]] = None,
) -> Embedding:
"""Async get aggregated embedding from multiple queries."""
query_embeddings = [await self.aget_query_embedding(query) for query in queries]
agg_fn = agg_fn or mean_agg
return agg_fn(query_embeddings)
@abstractmethod
def _get_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text synchronously.
Subclasses should implement this method. Reference get_text_embedding's
docstring for more information.
"""
async def _aget_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text asynchronously.
Subclasses can implement this method if there is a true async
implementation. Reference get_text_embedding's docstring for more
information.
"""
# Default implementation just falls back on _get_text_embedding
return self._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Embed the input sequence of text synchronously.
Subclasses can implement this method if batch queries are supported.
"""
# Default implementation just loops over _get_text_embedding
return [self._get_text_embedding(text) for text in texts]
async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]:
"""
Embed the input sequence of text asynchronously.
Subclasses can implement this method if batch queries are supported.
"""
return await asyncio.gather(
*[self._aget_text_embedding(text) for text in texts]
)
def _get_text_embeddings_cached(self, texts: List[str]) -> List[Embedding]:
"""
Get text embeddings from cache. If not in cache, generate them.
"""
if self.embeddings_cache is None:
raise ValueError("embeddings_cache must be defined")
embeddings: List[Optional[Embedding]] = [None for i in range(len(texts))]
# Tuples of (index, text) to be able to keep same order of embeddings
non_cached_texts: List[Tuple[int, str]] = []
for i, txt in enumerate(texts):
cached_emb = self.embeddings_cache.get(key=txt, collection="embeddings")
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
embeddings[i] = cached_emb[cached_key]
else:
non_cached_texts.append((i, txt))
if len(non_cached_texts) > 0:
text_embeddings = self._get_text_embeddings(
[x[1] for x in non_cached_texts]
)
for j, text_embedding in enumerate(text_embeddings):
orig_i = non_cached_texts[j][0]
embeddings[orig_i] = text_embedding
self.embeddings_cache.put(
key=texts[orig_i],
val={str(uuid.uuid4()): text_embedding},
collection="embeddings",
)
return cast(List[Embedding], embeddings)
async def _aget_text_embeddings_cached(self, texts: List[str]) -> List[Embedding]:
"""
Asynchronously get text embeddings from cache. If not in cache, generate them.
"""
if self.embeddings_cache is None:
raise ValueError("embeddings_cache must be defined")
embeddings: List[Optional[Embedding]] = [None for i in range(len(texts))]
# Tuples of (index, text) to be able to keep same order of embeddings
non_cached_texts: List[Tuple[int, str]] = []
for i, txt in enumerate(texts):
cached_emb = await self.embeddings_cache.aget(
key=txt, collection="embeddings"
)
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
embeddings[i] = cached_emb[cached_key]
else:
non_cached_texts.append((i, txt))
if len(non_cached_texts) > 0:
text_embeddings = await self._aget_text_embeddings(
[x[1] for x in non_cached_texts]
)
for j, text_embedding in enumerate(text_embeddings):
orig_i = non_cached_texts[j][0]
embeddings[orig_i] = text_embedding
await self.embeddings_cache.aput(
key=texts[orig_i],
val={str(uuid.uuid4()): text_embedding},
collection="embeddings",
)
return cast(List[Embedding], embeddings)
@dispatcher.span
def get_text_embedding(self, text: str) -> Embedding:
"""
Embed the input text.
When embedding text, depending on the model, a special instruction
can be prepended to the raw text string. For example, "Represent the
document for retrieval: ". If you're curious, other examples of
predefined instructions can be found in embeddings/huggingface_utils.py.
"""
model_dict = self.to_dict()
model_dict.pop("api_key", None)
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
with self.callback_manager.event(
CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
) as event:
if not self.embeddings_cache:
text_embedding = self._get_text_embedding(text)
elif self.embeddings_cache is not None:
cached_emb = self.embeddings_cache.get(
key=text, collection="embeddings"
)
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
text_embedding = cached_emb[cached_key]
else:
text_embedding = self._get_text_embedding(text)
self.embeddings_cache.put(
key=text,
val={str(uuid.uuid4()): text_embedding},
collection="embeddings",
)
event.on_end(
payload={
EventPayload.CHUNKS: [text],
EventPayload.EMBEDDINGS: [text_embedding],
}
)
dispatcher.event(
EmbeddingEndEvent(
chunks=[text],
embeddings=[text_embedding],
)
)
return text_embedding
@dispatcher.span
async def aget_text_embedding(self, text: str) -> Embedding:
"""Async get text embedding."""
model_dict = self.to_dict()
model_dict.pop("api_key", None)
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
with self.callback_manager.event(
CBEventType.EMBEDDING, payload={EventPayload.SERIALIZED: self.to_dict()}
) as event:
if not self.embeddings_cache:
text_embedding = await self._aget_text_embedding(text)
elif self.embeddings_cache is not None:
cached_emb = await self.embeddings_cache.aget(
key=text, collection="embeddings"
)
if cached_emb is not None:
cached_key = next(iter(cached_emb.keys()))
text_embedding = cached_emb[cached_key]
else:
text_embedding = await self._aget_text_embedding(text)
await self.embeddings_cache.aput(
key=text,
val={str(uuid.uuid4()): text_embedding},
collection="embeddings",
)
event.on_end(
payload={
EventPayload.CHUNKS: [text],
EventPayload.EMBEDDINGS: [text_embedding],
}
)
dispatcher.event(
EmbeddingEndEvent(
chunks=[text],
embeddings=[text_embedding],
)
)
return text_embedding
@dispatcher.span
def get_text_embedding_batch(
self,
texts: List[str],
show_progress: bool = False,
**kwargs: Any,
) -> List[Embedding]:
"""Get a list of text embeddings, with batching."""
cur_batch: List[str] = []
result_embeddings: List[Embedding] = []
queue_with_progress = enumerate(
get_tqdm_iterable(texts, show_progress, "Generating embeddings")
)
model_dict = self.to_dict()
model_dict.pop("api_key", None)
for idx, text in queue_with_progress:
cur_batch.append(text)
if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
# flush
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
with self.callback_manager.event(
CBEventType.EMBEDDING,
payload={EventPayload.SERIALIZED: self.to_dict()},
) as event:
if not self.embeddings_cache:
embeddings = self._get_text_embeddings(cur_batch)
elif self.embeddings_cache is not None:
embeddings = self._get_text_embeddings_cached(cur_batch)
result_embeddings.extend(embeddings)
event.on_end(
payload={
EventPayload.CHUNKS: cur_batch,
EventPayload.EMBEDDINGS: embeddings,
},
)
dispatcher.event(
EmbeddingEndEvent(
chunks=cur_batch,
embeddings=embeddings,
)
)
cur_batch = []
return result_embeddings
@dispatcher.span
async def aget_text_embedding_batch(
self,
texts: List[str],
show_progress: bool = False,
**kwargs: Any,
) -> List[Embedding]:
"""Asynchronously get a list of text embeddings, with batching."""
num_workers = self.num_workers
model_dict = self.to_dict()
model_dict.pop("api_key", None)
cur_batch: List[str] = []
embeddings_coroutines: List[Coroutine] = []
callback_payloads: List[Tuple[str, List[str]]] = []
# for idx, text in queue_with_progress:
for idx, text in enumerate(texts):
cur_batch.append(text)
if idx == len(texts) - 1 or len(cur_batch) == self.embed_batch_size:
# flush
dispatcher.event(
EmbeddingStartEvent(
model_dict=model_dict,
)
)
event_id = self.callback_manager.on_event_start(
CBEventType.EMBEDDING,
payload={EventPayload.SERIALIZED: self.to_dict()},
)
callback_payloads.append((event_id, cur_batch))
if not self.embeddings_cache:
embeddings_coroutines.append(self._aget_text_embeddings(cur_batch))
elif self.embeddings_cache is not None:
embeddings_coroutines.append(
self._aget_text_embeddings_cached(cur_batch)
)
cur_batch = []
# flatten the results of asyncio.gather, which is a list of embeddings lists
if len(embeddings_coroutines) > 0:
if num_workers and num_workers > 1:
nested_embeddings = await run_jobs(
embeddings_coroutines,
show_progress=show_progress,
workers=self.num_workers,
desc="Generating embeddings",
)
elif show_progress:
try:
from tqdm.asyncio import tqdm_asyncio
nested_embeddings = await tqdm_asyncio.gather(
*embeddings_coroutines,
total=len(embeddings_coroutines),
desc="Generating embeddings",
)
except ImportError:
nested_embeddings = await asyncio.gather(*embeddings_coroutines)
else:
nested_embeddings = await asyncio.gather(*embeddings_coroutines)
else:
nested_embeddings = []
result_embeddings = [
embedding for embeddings in nested_embeddings for embedding in embeddings
]
for (event_id, text_batch), embeddings in zip(
callback_payloads, nested_embeddings
):
dispatcher.event(
EmbeddingEndEvent(
chunks=text_batch,
embeddings=embeddings,
)
)
self.callback_manager.on_event_end(
CBEventType.EMBEDDING,
payload={
EventPayload.CHUNKS: text_batch,
EventPayload.EMBEDDINGS: embeddings,
},
event_id=event_id,
)
return result_embeddings
def similarity(
self,
embedding1: Embedding,
embedding2: Embedding,
mode: SimilarityMode = SimilarityMode.DEFAULT,
) -> float:
"""Get embedding similarity."""
return similarity(embedding1=embedding1, embedding2=embedding2, mode=mode)
def __call__(self, nodes: Sequence[BaseNode], **kwargs: Any) -> Sequence[BaseNode]:
embeddings = self.get_text_embedding_batch(
[node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
**kwargs,
)
for node, embedding in zip(nodes, embeddings):
node.embedding = embedding
return nodes
async def acall(
self, nodes: Sequence[BaseNode], **kwargs: Any
) -> Sequence[BaseNode]:
embeddings = await self.aget_text_embedding_batch(
[node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
**kwargs,
)
for node, embedding in zip(nodes, embeddings):
node.embedding = embedding
return nodes
| BaseEmbedding |
python | dask__dask | dask/dataframe/dask_expr/_repartition.py | {
"start": 14601,
"end": 18038
} | class ____(Repartition):
@functools.cached_property
def _size(self):
size = self.operand("partition_size")
if isinstance(size, str):
size = parse_bytes(size)
return int(size)
@functools.cached_property
def _mem_usage(self):
return _get_mem_usages(self.frame)
@functools.cached_property
def _nsplits(self):
return 1 + self._mem_usage // self._size
@functools.cached_property
def _partition_boundaries(self):
nsplits = self._nsplits
mem_usages = self._mem_usage
if np.any(nsplits > 1):
split_mem_usages = []
for n, usage in zip(nsplits, mem_usages):
split_mem_usages.extend([usage / n] * n)
mem_usages = pd.Series(split_mem_usages)
assert np.all(mem_usages <= self._size)
new_npartitions = list(map(len, iter_chunks(mem_usages, self._size)))
new_partitions_boundaries = np.cumsum(new_npartitions)
return _clean_new_division_boundaries(
new_partitions_boundaries, self.frame.npartitions
)
def _divisions(self):
if np.any(self._nsplits > 1):
return (None,) * len(self._partition_boundaries)
return (self.frame.divisions[i] for i in self._partition_boundaries)
def _lower(self):
# populate cache
self._mem_usage
return super()._lower()
def _layer(self) -> dict:
df = self.frame
dsk: dict[tuple, Any] = {}
if np.any(self._nsplits > 1):
split_name = f"split-{tokenize(df, self._nsplits)}"
new_name = f"repartition-split-{self._size}-{tokenize(df)}"
j = 0
for i, k in enumerate(self._nsplits):
if k == 1:
dsk[new_name, j] = (df._name, i)
j += 1
else:
dsk[split_name, i] = (split_evenly, (df._name, i), k)
for jj in range(k):
dsk[new_name, j] = (getitem, (split_name, i), jj)
j += 1
else:
new_name = self.frame._name
dsk.update(
{
(self._name, i): (
methods.concat,
[(new_name, j) for j in range(start, end)],
)
for i, (start, end) in enumerate(
zip(self._partition_boundaries, self._partition_boundaries[1:])
)
}
)
return dsk
def _clean_new_division_boundaries(new_partitions_boundaries, frame_npartitions):
if not isinstance(new_partitions_boundaries, list):
new_partitions_boundaries = list(new_partitions_boundaries)
if new_partitions_boundaries[0] > 0:
new_partitions_boundaries.insert(0, 0)
if new_partitions_boundaries[-1] < frame_npartitions:
new_partitions_boundaries[-1] = frame_npartitions
return new_partitions_boundaries
mem_usages_lru = LRU(10) # type: ignore[var-annotated]
def _get_mem_usages(frame):
if frame._name in mem_usages_lru:
return mem_usages_lru[frame._name]
result = _compute_mem_usages(frame)
mem_usages_lru[frame._name] = result
return result
def _compute_mem_usages(frame):
from dask.dataframe.dask_expr._collection import new_collection
return new_collection(TotalMemoryUsageFrame(frame, deep=True)).compute()
| RepartitionSize |
python | pytorch__pytorch | test/dynamo/test_backward_higher_order_ops.py | {
"start": 585,
"end": 1957
} | class ____(torch._dynamo.test_case.TestCase):
def test_invoke_in_eager(self):
x = torch.tensor([0.5, 0.5], requires_grad=True)
y = torch.tensor([0.5, 0.5], requires_grad=True)
def fn(x, y):
x.register_hook(_multiply_invoke)
return x * y
out = fn(x, y)
grad_out = torch.tensor([2.0, 2.0])
out.backward(grad_out)
self.assertEqual(x.grad, y * grad_out)
def test_invoke_in_pt2(self):
for backend in ["eager", "aot_eager", "inductor"]:
torch._dynamo.reset()
x = torch.tensor([0.5, 0.5], requires_grad=True)
y = torch.tensor([0.5, 0.5], requires_grad=True)
def fn(x, y):
x.register_hook(_multiply_invoke)
return x * y
fn = torch.compile(fn, backend=backend)
out = fn(x, y)
grad_out = torch.tensor([2.0, 2.0])
out.backward(grad_out)
self.assertEqual(x.grad, grad_out * y)
def test_invoke_make_fx_forward_contrived(self):
x = torch.tensor([0.5, 0.5], requires_grad=True)
out = make_fx(_multiply_invoke)(x)
self.assertEqual(out(x), torch.tensor([0.25, 0.25]))
actual = normalize_gm(out.print_readable(False))
self.assertExpectedInline(
actual,
"""\
| BackwardHigherOrderOpTests |
python | apache__airflow | providers/google/tests/unit/google/test_go_module.py | {
"start": 939,
"end": 1687
} | class ____:
@mock.patch("airflow.providers.google.go_module_utils._execute_in_subprocess")
def test_should_init_go_module(self, mock_execute_in_subprocess):
init_module(go_module_name="example.com/main", go_module_path="/home/example/go")
mock_execute_in_subprocess.assert_called_once_with(
["go", "mod", "init", "example.com/main"], cwd="/home/example/go"
)
@mock.patch("airflow.providers.google.go_module_utils._execute_in_subprocess")
def test_should_install_module_dependencies(self, mock_execute_in_subprocess):
install_dependencies(go_module_path="/home/example/go")
mock_execute_in_subprocess.assert_called_once_with(["go", "mod", "tidy"], cwd="/home/example/go")
| TestGoModule |
python | ray-project__ray | python/ray/autoscaler/_private/kuberay/autoscaling_config.py | {
"start": 1130,
"end": 20847
} | class ____:
"""Produces an autoscaling config by reading data from the RayCluster CR.
Used to fetch the autoscaling config at the beginning of each autoscaler iteration.
In the context of Ray deployment on Kubernetes, the autoscaling config is an
internal interface.
The autoscaling config carries the strict subset of RayCluster CR data required by
the autoscaler to make scaling decisions; in particular, the autoscaling config does
not carry pod configuration data.
This class is the only public object in this file.
"""
def __init__(self, ray_cluster_name, ray_cluster_namespace):
self.kubernetes_api_client = node_provider.KubernetesHttpApiClient(
namespace=ray_cluster_namespace
)
self._ray_cr_path = f"rayclusters/{ray_cluster_name}"
def __call__(self):
ray_cr = self._fetch_ray_cr_from_k8s_with_retries()
autoscaling_config = _derive_autoscaling_config_from_ray_cr(ray_cr)
return autoscaling_config
def _fetch_ray_cr_from_k8s_with_retries(self) -> Dict[str, Any]:
"""Fetch the RayCluster CR by querying the K8s API server.
Retry on HTTPError for robustness, in particular to protect autoscaler
initialization.
"""
for i in range(1, MAX_RAYCLUSTER_FETCH_TRIES + 1):
try:
return self.kubernetes_api_client.get(self._ray_cr_path)
except requests.HTTPError as e:
if i < MAX_RAYCLUSTER_FETCH_TRIES:
logger.exception(
"Failed to fetch RayCluster CR from K8s. Retrying."
)
time.sleep(RAYCLUSTER_FETCH_RETRY_S)
else:
raise e from None
# This branch is inaccessible. Raise to satisfy mypy.
raise AssertionError
def _derive_autoscaling_config_from_ray_cr(ray_cr: Dict[str, Any]) -> Dict[str, Any]:
provider_config = _generate_provider_config(ray_cr["metadata"]["namespace"])
available_node_types = _generate_available_node_types_from_ray_cr_spec(
ray_cr["spec"]
)
# The autoscaler expects a global max workers field. We set it to the sum of
# node type max workers.
global_max_workers = sum(
node_type["max_workers"] for node_type in available_node_types.values()
)
# Legacy autoscaling fields carry no information but are required for compatibility.
legacy_autoscaling_fields = _generate_legacy_autoscaling_config_fields()
# Process autoscaler options.
autoscaler_options = ray_cr["spec"].get(AUTOSCALER_OPTIONS_KEY, {})
if IDLE_SECONDS_KEY in autoscaler_options:
idle_timeout_minutes = autoscaler_options[IDLE_SECONDS_KEY] / 60.0
else:
idle_timeout_minutes = 1.0
if autoscaler_options.get(UPSCALING_KEY) == UPSCALING_VALUE_CONSERVATIVE:
upscaling_speed = 1 # Rate-limit upscaling if "Conservative" is set by user.
# This elif is redudant but included for clarity.
elif autoscaler_options.get(UPSCALING_KEY) == UPSCALING_VALUE_DEFAULT:
upscaling_speed = 1000 # i.e. big, no rate-limiting by default
# This elif is redudant but included for clarity.
elif autoscaler_options.get(UPSCALING_KEY) == UPSCALING_VALUE_AGGRESSIVE:
upscaling_speed = 1000
else:
upscaling_speed = 1000
autoscaling_config = {
"provider": provider_config,
"cluster_name": ray_cr["metadata"]["name"],
"head_node_type": _HEAD_GROUP_NAME,
"available_node_types": available_node_types,
"max_workers": global_max_workers,
# Should consider exposing `idleTimeoutMinutes` in the RayCluster CRD,
# under an `autoscaling` field.
"idle_timeout_minutes": idle_timeout_minutes,
# Should consider exposing `upscalingSpeed` in the RayCluster CRD,
# under an `autoscaling` field.
"upscaling_speed": upscaling_speed,
**legacy_autoscaling_fields,
}
# Make sure the config is readable by the autoscaler.
validate_config(autoscaling_config)
return autoscaling_config
def _generate_provider_config(ray_cluster_namespace: str) -> Dict[str, Any]:
"""Generates the `provider` field of the autoscaling config, which carries data
required to instantiate the KubeRay node provider.
"""
return {
"type": "kuberay",
"namespace": ray_cluster_namespace,
DISABLE_NODE_UPDATERS_KEY: True,
DISABLE_LAUNCH_CONFIG_CHECK_KEY: True,
FOREGROUND_NODE_LAUNCH_KEY: True,
WORKER_LIVENESS_CHECK_KEY: False,
}
def _generate_legacy_autoscaling_config_fields() -> Dict[str, Any]:
"""Generates legacy autoscaling config fields required for compatibiliy."""
return {
"file_mounts": {},
"cluster_synced_files": [],
"file_mounts_sync_continuously": False,
"initialization_commands": [],
"setup_commands": [],
"head_setup_commands": [],
"worker_setup_commands": [],
"head_start_ray_commands": [],
"worker_start_ray_commands": [],
"auth": {},
}
def _generate_available_node_types_from_ray_cr_spec(
ray_cr_spec: Dict[str, Any],
) -> Dict[str, Any]:
"""Formats autoscaler "available_node_types" field based on the Ray CR's group
specs.
"""
headGroupSpec = ray_cr_spec["headGroupSpec"]
return {
_HEAD_GROUP_NAME: _node_type_from_group_spec(headGroupSpec, is_head=True),
**{
worker_group_spec["groupName"]: _node_type_from_group_spec(
worker_group_spec, is_head=False
)
for worker_group_spec in ray_cr_spec["workerGroupSpecs"]
},
}
def _node_type_from_group_spec(
group_spec: Dict[str, Any], is_head: bool
) -> Dict[str, Any]:
"""Converts CR group spec to autoscaler node type."""
if is_head:
# The head node type has no workers because the head is not a worker.
min_workers = max_workers = 0
else:
# `minReplicas` and `maxReplicas` are required fields for each workerGroupSpec.
# numOfHosts specifies the number of workers per replica in KubeRay v1.1+.
min_workers = group_spec["minReplicas"] * group_spec.get("numOfHosts", 1)
max_workers = group_spec["maxReplicas"] * group_spec.get("numOfHosts", 1)
resources = _get_ray_resources_from_group_spec(group_spec, is_head)
labels = _get_labels_from_group_spec(group_spec)
node_type = {
"min_workers": min_workers,
"max_workers": max_workers,
# `node_config` is a legacy field required for compatibility.
# Pod config data is required by the operator but not by the autoscaler.
"node_config": {},
"resources": resources,
"labels": labels,
}
idle_timeout_s = group_spec.get(IDLE_SECONDS_KEY)
if idle_timeout_s is not None:
node_type["idle_timeout_s"] = float(idle_timeout_s)
return node_type
def _get_ray_resources_from_group_spec(
group_spec: Dict[str, Any], is_head: bool
) -> Dict[str, int]:
"""
Infers Ray resources from group `Resources` field, rayStartCommands, or K8s limits.
The resources extracted are used in autoscaling calculations.
"""
# Set resources from top-level group 'Resources' field if it exists.
group_resources = group_spec.get("resources", {})
ray_start_params = group_spec.get("rayStartParams", {})
# In KubeRay, Ray container is always the first application container of a Ray Pod.
k8s_resources = group_spec["template"]["spec"]["containers"][0].get("resources", {})
group_name = _HEAD_GROUP_NAME if is_head else group_spec["groupName"]
num_cpus = _get_num_cpus(
group_resources, ray_start_params, k8s_resources, group_name
)
num_gpus = _get_num_gpus(
group_resources, ray_start_params, k8s_resources, group_name
)
custom_resource_dict = _get_custom_resources(
group_resources, ray_start_params, group_name
)
num_tpus = _get_num_tpus(group_resources, custom_resource_dict, k8s_resources)
memory = _get_memory(group_resources, ray_start_params, k8s_resources)
# It's not allowed to use object store memory as a resource request, so we don't
# add that to the autoscaler's resources annotations.
resources = {}
assert isinstance(num_cpus, int)
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if num_tpus is not None:
# Add TPU Ray resource if not already added by ray_start_params,
# but specified in k8s_resource_limits.
if "TPU" not in custom_resource_dict:
resources["TPU"] = num_tpus
"""Add TPU head resource, similar to the GCP node_provider.
Sets the Ray resource TPU-{...}-head to ensure the Ray autoscaler
has sufficient resources to make scaling decisions.
TPU worker groups treat each TPU podslice as a replica, with `NumOfHosts`
specifying the number of workers per slice. Each replica of a TPU worker
group has one TPU head.
For example, a v4-16 worker group with 2 replicas should have the following
resource labels on worker 0 of each replica:
worker 0: resources = {"TPU": 4, "TPU-v4-16-head": 1}
"""
if (
"nodeSelector" in group_spec["template"]["spec"]
and GKE_TPU_TOPOLOGY_LABEL in group_spec["template"]["spec"]["nodeSelector"]
and GKE_TPU_ACCELERATOR_LABEL
in group_spec["template"]["spec"]["nodeSelector"]
):
topology = group_spec["template"]["spec"]["nodeSelector"][
GKE_TPU_TOPOLOGY_LABEL
]
accelerator = group_spec["template"]["spec"]["nodeSelector"][
GKE_TPU_ACCELERATOR_LABEL
]
accelerator_type = utils.tpu_node_selectors_to_type(topology, accelerator)
if accelerator_type:
resources[f"TPU-{accelerator_type}-head"] = 1
else:
logger.error(
f"Pods using TPUs require both `{GKE_TPU_TOPOLOGY_LABEL}` and `{GKE_TPU_ACCELERATOR_LABEL}` node selectors. "
"See https://docs.ray.io/en/latest/cluster/kubernetes/user-guides/tpu.html#configuring-ray-pods-for-tpu-usage "
"and https://cloud.google.com/kubernetes-engine/docs/how-to/tpus."
)
if memory is not None:
resources["memory"] = memory
resources.update(custom_resource_dict)
return resources
def _get_labels_from_group_spec(group_spec: Dict[str, Any]) -> Dict[str, str]:
"""
Parses Ray node labels for the autoscaling config based on the following
priority:
1. Top-level `labels` field in the group spec.
2. `labels` field in `rayStartParams`.
"""
labels_dict = {}
ray_start_params = group_spec.get("rayStartParams", {})
labels_str = ray_start_params.get("labels")
if labels_str:
logger.warning(
f"Ignoring labels: {labels_str} set in rayStartParams. Group labels are supported in the top-level Labels field starting in KubeRay v1.5"
)
# Check for top-level structured Labels field.
if "labels" in group_spec and isinstance(group_spec.get("labels"), dict):
labels_dict = group_spec.get("labels")
# Validate node labels follow expected Kubernetes label syntax.
validate_node_label_syntax(labels_dict)
return labels_dict
def _get_num_cpus(
group_resources: Dict[str, str],
ray_start_params: Dict[str, str],
k8s_resources: Dict[str, Dict[str, str]],
group_name: str,
) -> int:
"""Get CPU annotation from `resources` field, ray_start_params or k8s_resources,
with priority for `resources` field.
"""
if "CPU" in group_resources:
if "num-cpus" in ray_start_params:
logger.warning(
f"'CPU' specified in both the top-level 'resources' field and in 'rayStartParams'. "
f"Using the value from 'resources': {group_resources['CPU']}."
)
return _round_up_k8s_quantity(group_resources["CPU"])
if "num-cpus" in ray_start_params:
return int(ray_start_params["num-cpus"])
elif "cpu" in k8s_resources.get("limits", {}):
cpu_quantity: str = k8s_resources["limits"]["cpu"]
return _round_up_k8s_quantity(cpu_quantity)
elif "cpu" in k8s_resources.get("requests", {}):
cpu_quantity: str = k8s_resources["requests"]["cpu"]
return _round_up_k8s_quantity(cpu_quantity)
else:
# Getting the number of CPUs is important, so raise an error if we can't do it.
raise ValueError(
f"Autoscaler failed to detect `CPU` resources for group {group_name}."
"\nSet the `--num-cpus` rayStartParam and/or "
"the CPU resource limit for the Ray container."
)
def _get_memory(
group_resources: Dict[str, str],
ray_start_params: Dict[str, str],
k8s_resources: Dict[str, Dict[str, str]],
) -> Optional[int]:
"""Get memory resource annotation from `resources` field, ray_start_params or k8s_resources,
with priority for `resources` field.
"""
if "memory" in group_resources:
if "memory" in ray_start_params:
logger.warning(
f"'memory' specified in both the top-level 'resources' field and in 'rayStartParams'. "
f"Using the value from 'resources': {group_resources['memory']}."
)
return _round_up_k8s_quantity(group_resources["memory"])
if "memory" in ray_start_params:
return int(ray_start_params["memory"])
elif "memory" in k8s_resources.get("limits", {}):
memory_quantity: str = k8s_resources["limits"]["memory"]
return _round_up_k8s_quantity(memory_quantity)
elif "memory" in k8s_resources.get("requests", {}):
memory_quantity: str = k8s_resources["requests"]["memory"]
return _round_up_k8s_quantity(memory_quantity)
return None
def _get_num_gpus(
group_resources: Dict[str, str],
ray_start_params: Dict[str, str],
k8s_resources: Dict[str, Dict[str, str]],
group_name: str,
) -> Optional[int]:
"""Get GPU resource annotation from `resources` field, ray_start_params or k8s_resources,
with priority for `resources` field.
"""
if "GPU" in group_resources:
if "num-gpus" in ray_start_params:
logger.warning(
f"'GPU' specified in both the top-level 'resources' field and in 'rayStartParams'. "
f"Using the value from 'resources': {group_resources['GPU']}."
)
return _round_up_k8s_quantity(group_resources["GPU"])
elif "num-gpus" in ray_start_params:
return int(ray_start_params["num-gpus"])
else:
for key, resource_quantity in chain(
k8s_resources.get("limits", {}).items(),
k8s_resources.get("requests", {}).items(),
):
# e.g. nvidia.com/gpu
if key.endswith("gpu"):
# Typically, this is a string representing an interger, e.g. "1".
# Convert to int, making no assumptions on the resource_quantity,
# besides that it's valid as a K8s resource quantity.
num_gpus = _round_up_k8s_quantity(resource_quantity)
if num_gpus > 0:
# Only one GPU type supported for now, break out on first
# "/gpu" match.
return num_gpus
return None
def _get_num_tpus(
group_resources: Dict[str, str],
custom_resource_dict: Dict[str, int],
k8s_resources: Dict[str, Dict[str, str]],
) -> Optional[int]:
"""Get TPU custom resource annotation from `resources` field, custom_resource_dict in ray_start_params,
or k8s_resources, with priority for `resources` field.
"""
if "TPU" in group_resources:
return _round_up_k8s_quantity(group_resources["TPU"])
elif "TPU" in custom_resource_dict:
return custom_resource_dict["TPU"]
else:
for typ in ["limits", "requests"]:
tpu_resource_quantity = k8s_resources.get(typ, {}).get("google.com/tpu")
if tpu_resource_quantity is not None:
# Typically, this is a string representing an integer, e.g. "1".
# Convert to int, making no assumptions on the tpu_resource_quantity,
# besides that it's valid as a K8s resource quantity.
num_tpus = _round_up_k8s_quantity(tpu_resource_quantity)
if num_tpus > 0:
return num_tpus
return None
def _round_up_k8s_quantity(quantity: str) -> int:
"""Rounds a Kubernetes resource quantity up to the nearest integer.
Args:
quantity: Resource quantity as a string in the canonical K8s form.
Returns:
The quantity, rounded up, as an integer.
"""
resource_decimal: decimal.Decimal = utils.parse_quantity(quantity)
rounded = resource_decimal.to_integral_value(rounding=decimal.ROUND_UP)
return int(rounded)
def _get_custom_resources(
group_resources: Dict[str, str], ray_start_params: Dict[str, Any], group_name: str
) -> Dict[str, int]:
"""Format custom resources based on the group `resources` field or `resources` Ray start param.
Currently, the value of the rayStartParam `resources` field must
be formatted as follows:
'"{\"Custom1\": 1, \"Custom2\": 5}"'.
This method first converts the input to a correctly formatted
json string and then loads that json string to a dict.
"""
# If the top-level `resources` field is defined, use it as the exclusive source.
if group_resources:
if "resources" in ray_start_params:
logger.warning(
f"custom resources specified in both the top-level 'resources' field and in 'rayStartParams'. "
f"Using the values from 'resources': {group_resources}."
)
standard_keys = {"CPU", "GPU", "TPU", "memory"}
try:
custom_resources = {
k: _round_up_k8s_quantity(v)
for k, v in group_resources.items()
if k not in standard_keys
}
except Exception as e:
logger.error(
f"Error reading `resource` for group {group_name}."
" For the correct format, refer to example configuration at "
"https://github.com/ray-project/ray/blob/master/python/"
"ray/autoscaler/kuberay/ray-cluster.complete.yaml."
)
raise e
return custom_resources
# Otherwise, check rayStartParams.
if "resources" not in ray_start_params:
return {}
resources_string = ray_start_params["resources"]
try:
# Drop the extra pair of quotes and remove the backslash escapes.
# resources_json should be a json string.
resources_json = resources_string[1:-1].replace("\\", "")
# Load a dict from the json string.
resources = json.loads(resources_json)
assert isinstance(resources, dict)
for key, value in resources.items():
assert isinstance(key, str)
assert isinstance(value, int)
except Exception as e:
logger.error(
f"Error reading `resource` rayStartParam for group {group_name}."
" For the correct format, refer to example configuration at "
"https://github.com/ray-project/ray/blob/master/python/"
"ray/autoscaler/kuberay/ray-cluster.complete.yaml."
)
raise e
return resources
| AutoscalingConfigProducer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/instigation.py | {
"start": 6854,
"end": 8461
} | class ____(DynamicPartitionsRequestMixin, graphene.ObjectType):
class Meta: # pyright: ignore[reportIncompatibleVariableOverride]
name = "DynamicPartitionsRequestResult"
skippedPartitionKeys = non_null_list(graphene.String)
def __init__(self, dynamic_partitions_request_result: DynamicPartitionsRequestResult):
super().__init__()
self._dynamic_partitions_request_result = dynamic_partitions_request_result
def get_dynamic_partitions_request(
self,
) -> Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]:
if self._dynamic_partitions_request_result.added_partitions is not None:
return AddDynamicPartitionsRequest(
partition_keys=self._dynamic_partitions_request_result.added_partitions,
partitions_def_name=self._dynamic_partitions_request_result.partitions_def_name,
)
elif self._dynamic_partitions_request_result.deleted_partitions is not None:
return DeleteDynamicPartitionsRequest(
partition_keys=self._dynamic_partitions_request_result.deleted_partitions,
partitions_def_name=self._dynamic_partitions_request_result.partitions_def_name,
)
else:
check.failed(
"Unexpected dynamic_partitions_request_result"
f" {self._dynamic_partitions_request_result}"
)
def resolve_skippedPartitionKeys(self, _graphene_info: ResolveInfo):
return self._dynamic_partitions_request_result.skipped_partitions
| GrapheneDynamicPartitionsRequestResult |
python | ray-project__ray | python/ray/experimental/channel/torch_tensor_type.py | {
"start": 550,
"end": 7424
} | class ____(ChannelOutputType):
AUTO = "auto"
CPU = "cpu"
ACCELERATOR = "accelerator"
def __init__(
self,
transport: Optional[Union[str, Communicator]] = AUTO,
device: Device = Device.DEFAULT,
_static_shape: bool = False,
_direct_return: Optional[bool] = False,
):
"""
A type hint that can be used to annotate DAG nodes that return a
torch.Tensor.
NOTE: Use of this type in the DAG will register a custom serializer for
torch.Tensor that moves the tensor to the correct device on the
receiver. If you are using ray.cloudpickle to serialize objects and you
do not want this behavior, deregister the custom serializer using
ray.util.serialization.deregister_serializer(torch.Tensor).
Args:
transport: "auto" (default) means that tensors will be passed via
host memory, using numpy as the serialization format. Pass
TorchTensorType.ACCELERATOR or "accelerator" to use accelerator
instead, avoiding the host memory copy.
device: Target device for tensor transport. Options:
- "default": Retains the same device type as the sender.
- "cpu": Moves tensor to CPU on the receiver. Not compatible
with accelerator transport.
- "gpu" or "cuda": Moves tensor to GPU on the receiver.
_static_shape: A hint indicating whether the shape(s) and dtype(s)
of tensor(s) contained in this value always remain the same
across different executions of the DAG.
_direct_return: Whether the tensor is sent directly or inside of
other data. If a non-default `transport` is used, this allows
the sender and receiver to eliminate performance overhead from
an additional data transfer.
NOTE: Setting static_shape=True and _direct_return=True can improve
performance if a non-default transport is used. However, if either flag
is set, then the user must ensure that the condition is met.
If using this type as a Compiled Graph annotation, an exception will
be thrown in the following cases, and the DAG will be torn down. To
continue execution, a new DAG must be created:
1. If _static_shape=True, and the found tensors don't match the
previous shape or dtype(s).
2. If _direct_return=True, and the returned value is not a
torch.Tensor.
"""
super().__init__()
self._device = device
self._static_shape = _static_shape
self._direct_return = _direct_return
self._communicator: Optional[Communicator] = None
if isinstance(transport, Communicator):
self._communicator = transport
transport = transport.get_transport_name()
if transport not in [self.AUTO, self.CPU, self.ACCELERATOR]:
raise ValueError(
"`transport` must be TorchTensorType.AUTO, TorchTensorType.ACCELERATOR "
"or TorchTensorType.CPU"
)
if device == Device.CPU and transport == self.ACCELERATOR:
raise ValueError(
"accelerator transport is not supported with CPU target device."
)
self.transport = transport
self._communicator_id: Optional[str] = None
if self._static_shape and self.transport == self.AUTO:
logger.info(
"TorchTensorType(_static_shape=True) has no effect when "
"`transport` is TorchTensorType.AUTO (default)."
)
if self._direct_return and self.transport == self.AUTO:
logger.info(
"TorchTensorType(_direct_return=True) has no effect when "
"`transport` is TorchTensorType.AUTO (default)."
)
@property
def device(self) -> Device:
return self._device
@property
def static_shape(self):
return self._static_shape
@property
def direct_return(self):
return self._direct_return
def register_custom_serializer(self) -> None:
super().register_custom_serializer()
import torch
def serialize(t):
ctx = ChannelContext.get_current()
return ctx.serialization_context.serialize_tensor(t)
def deserialize(b):
ctx = ChannelContext.get_current()
return ctx.serialization_context.deserialize_tensor(b, self.device)
ray.util.serialization.register_serializer(
torch.Tensor,
serializer=serialize,
deserializer=deserialize,
)
def create_channel(
self,
writer: Optional["ray.actor.ActorHandle"],
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
driver_actor_id: Optional[str] = None,
_cpu_data_channel: Optional["Channel"] = None,
_tensor_metadata_channel: Optional["Channel"] = None,
) -> type:
if self.requires_accelerator():
from ray.experimental.channel.torch_tensor_accelerator_channel import (
TorchTensorAcceleratorChannel,
)
return TorchTensorAcceleratorChannel(
writer,
reader_and_node_list,
self,
driver_actor_id,
_tensor_metadata_channel,
_cpu_data_channel,
)
# Data does not require accelerator. Transfer via host memory using a
# shared-memory channel.
# TODO(swang): Allow the initial max buffer size to be overridden.
typ = SharedMemoryType()
return typ.create_channel(writer, reader_and_node_list, driver_actor_id)
def requires_accelerator(self) -> bool:
return self.transport == self.ACCELERATOR
def get_custom_communicator(self) -> Optional[Communicator]:
"""
Return the communicator group if one is specified.
"""
return self._communicator
def set_communicator_id(self, group_id: str) -> None:
self._communicator_id = group_id
@property
def communicator_id(self) -> Optional[str]:
return self._communicator_id
def __deepcopy__(self, memo):
"""
Deep copy all the fields except for the communicator group. The communicator
group should not be deep copied because it can be shared across `TorchTensorType`
instances.
"""
copy = TorchTensorType(
transport=self.transport,
_static_shape=self._static_shape,
_direct_return=self._direct_return,
)
copy._communicator = self._communicator
copy._communicator_id = self._communicator_id
return copy
| TorchTensorType |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 51892,
"end": 54918
} | class ____(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = {
"d_lon_coslat": u.Quantity,
"d_lat": u.Quantity,
"d_distance": u.Quantity,
}
def __init__(self, d_lon_coslat, d_lat=None, d_distance=None, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError("d_lon_coslat and d_lat should have equivalent units.")
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta, representation.d_r)
return super().from_representation(representation, base)
def _scale_operation(self, op, *args, scaled_base=False):
if scaled_base:
return self.__class__(
self.d_lon_coslat, self.d_lat, op(self.d_distance, *args)
)
else:
return super()._scale_operation(op, *args)
| SphericalCosLatDifferential |
python | google__python-fire | fire/console/platforms.py | {
"start": 4934,
"end": 8218
} | class ____(object):
"""An enum representing the system architecture you are running on."""
class _ARCH(object):
"""A single architecture."""
# pylint: disable=redefined-builtin
def __init__(self, id, name, file_name):
self.id = id
self.name = name
self.file_name = file_name
def __str__(self):
return self.id
def __eq__(self, other):
return (isinstance(other, type(self)) and
self.id == other.id and
self.name == other.name and
self.file_name == other.file_name)
def __hash__(self):
return hash(self.id) + hash(self.name) + hash(self.file_name)
def __ne__(self, other):
return not self == other
@classmethod
def _CmpHelper(cls, x, y):
"""Just a helper equivalent to the cmp() function in Python 2."""
return (x > y) - (x < y)
def __lt__(self, other):
return self._CmpHelper(
(self.id, self.name, self.file_name),
(other.id, other.name, other.file_name)) < 0
def __gt__(self, other):
return self._CmpHelper(
(self.id, self.name, self.file_name),
(other.id, other.name, other.file_name)) > 0
def __le__(self, other):
return not self.__gt__(other)
def __ge__(self, other):
return not self.__lt__(other)
x86 = _ARCH('x86', 'x86', 'x86')
x86_64 = _ARCH('x86_64', 'x86_64', 'x86_64')
ppc = _ARCH('PPC', 'PPC', 'ppc')
arm = _ARCH('arm', 'arm', 'arm')
_ALL = [x86, x86_64, ppc, arm]
# Possible values for `uname -m` and what arch they map to.
# Examples of possible values: https://en.wikipedia.org/wiki/Uname
_MACHINE_TO_ARCHITECTURE = {
'amd64': x86_64, 'x86_64': x86_64, 'i686-64': x86_64,
'i386': x86, 'i686': x86, 'x86': x86,
'ia64': x86, # Itanium is different x64 arch, treat it as the common x86.
'powerpc': ppc, 'power macintosh': ppc, 'ppc64': ppc,
'armv6': arm, 'armv6l': arm, 'arm64': arm, 'armv7': arm, 'armv7l': arm}
@staticmethod
def AllValues():
"""Gets all possible enum values.
Returns:
list, All the enum values.
"""
return list(Architecture._ALL)
@staticmethod
def FromId(architecture_id, error_on_unknown=True):
"""Gets the enum corresponding to the given architecture id.
Args:
architecture_id: str, The architecture id to parse
error_on_unknown: bool, True to raise an exception if the id is unknown,
False to just return None.
Raises:
InvalidEnumValue: If the given value cannot be parsed.
Returns:
ArchitectureTuple, One of the Architecture constants or None if the input
is None.
"""
if not architecture_id:
return None
for arch in Architecture._ALL:
if arch.id == architecture_id:
return arch
if error_on_unknown:
raise InvalidEnumValue(architecture_id, 'Architecture',
[value.id for value in Architecture._ALL])
return None
@staticmethod
def Current():
"""Determines the current system architecture.
Returns:
ArchitectureTuple, One of the Architecture constants or None if it cannot
be determined.
"""
return Architecture._MACHINE_TO_ARCHITECTURE.get(platform.machine().lower())
| Architecture |
python | huggingface__transformers | tests/models/blip/test_modeling_blip.py | {
"start": 7443,
"end": 10890
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return BlipTextConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = BlipTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| BlipTextModelTester |
python | huggingface__transformers | tests/models/layoutlm/test_modeling_layoutlm.py | {
"start": 13837,
"end": 17617
} | class ____(unittest.TestCase):
@slow
def test_forward_pass_no_head(self):
model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
# test the sequence output on [0, :3, :3]
expected_slice = torch.tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3)
# test the pooled output on [1, :3]
expected_slice = torch.tensor([-0.6580, -0.0214, 0.8552], device=torch_device)
torch.testing.assert_close(outputs.pooler_output[1, :3], expected_slice, rtol=1e-3, atol=1e-3)
@slow
def test_forward_pass_sequence_classification(self):
# initialize model with randomly initialized sequence classification head
model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, _ = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=torch.tensor([1, 1], device=torch_device),
)
# test whether we get a loss as a scalar
loss = outputs.loss
expected_shape = torch.Size([])
self.assertEqual(loss.shape, expected_shape)
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 2))
self.assertEqual(logits.shape, expected_shape)
@slow
def test_forward_pass_token_classification(self):
# initialize model with randomly initialized token classification head
model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=13).to(
torch_device
)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(
input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels
)
# test the loss calculation to be around 2.65
# expected_loss = torch.tensor(2.65, device=torch_device)
# The loss is currently somewhat random and can vary between 0.1-0.3 atol.
# self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=0.1))
# test the shape of the logits
logits = outputs.logits
expected_shape = torch.Size((2, 25, 13))
self.assertEqual(logits.shape, expected_shape)
@slow
def test_forward_pass_question_answering(self):
# initialize model with randomly initialized token classification head
model = LayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device)
input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs()
# forward pass
outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids)
# test the shape of the logits
expected_shape = torch.Size((2, 25))
self.assertEqual(outputs.start_logits.shape, expected_shape)
self.assertEqual(outputs.end_logits.shape, expected_shape)
| LayoutLMModelIntegrationTest |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/operators/test_hive_stats.py | {
"start": 1837,
"end": 14910
} | class ____(TestHiveEnvironment):
def setup_method(self, method):
self.kwargs = dict(
table="table",
partition=dict(col="col", value="value"),
metastore_conn_id="metastore_conn_id",
presto_conn_id="presto_conn_id",
mysql_conn_id="mysql_conn_id",
task_id="test_hive_stats_collection_operator",
)
super().setup_method(method)
def test_get_default_exprs(self):
col = "col"
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {(col, "non_null"): f"COUNT({col})"}
def test_get_default_exprs_excluded_cols(self):
col = "excluded_col"
self.kwargs.update(dict(excluded_columns=[col]))
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, None)
assert default_exprs == {}
def test_get_default_exprs_number(self):
col = "col"
for col_type in ["double", "int", "bigint", "float"]:
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, "avg"): f"AVG({col})",
(col, "max"): f"MAX({col})",
(col, "min"): f"MIN({col})",
(col, "non_null"): f"COUNT({col})",
(col, "sum"): f"SUM({col})",
}
def test_get_default_exprs_boolean(self):
col = "col"
col_type = "boolean"
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, "false"): f"SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)",
(col, "non_null"): f"COUNT({col})",
(col, "true"): f"SUM(CASE WHEN {col} THEN 1 ELSE 0 END)",
}
def test_get_default_exprs_string(self):
col = "col"
col_type = "string"
default_exprs = HiveStatsCollectionOperator(**self.kwargs).get_default_exprs(col, col_type)
assert default_exprs == {
(col, "approx_distinct"): f"APPROX_DISTINCT({col})",
(col, "len"): f"SUM(CAST(LENGTH({col}) AS BIGINT))",
(col, "non_null"): f"COUNT({col})",
}
@patch("airflow.providers.apache.hive.operators.hive_stats.json.dumps")
@patch("airflow.providers.apache.hive.operators.hive_stats.MySqlHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.PrestoHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook")
def test_execute(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
mock_hive_metastore_hook.assert_called_once_with(
metastore_conn_id=hive_stats_collection_operator.metastore_conn_id
)
mock_hive_metastore_hook.return_value.get_table.assert_called_once_with(
table_name=hive_stats_collection_operator.table
)
mock_presto_hook.assert_called_once_with(presto_conn_id=hive_stats_collection_operator.presto_conn_id)
mock_mysql_hook.assert_called_once_with(hive_stats_collection_operator.mysql_conn_id)
mock_json_dumps.assert_called_once_with(hive_stats_collection_operator.partition, sort_keys=True)
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {("", "count"): "COUNT(*)"}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table="hive_stats",
rows=rows,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
@patch("airflow.providers.apache.hive.operators.hive_stats.json.dumps")
@patch("airflow.providers.apache.hive.operators.hive_stats.MySqlHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.PrestoHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook")
def test_execute_with_assignment_func(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(col, _):
return {(col, "test"): f"TEST({col})"}
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {("", "count"): "COUNT(*)"}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.assignment_func(col, col_type))
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table="hive_stats",
rows=rows,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
@patch("airflow.providers.apache.hive.operators.hive_stats.json.dumps")
@patch("airflow.providers.apache.hive.operators.hive_stats.MySqlHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.PrestoHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook")
def test_execute_with_assignment_func_no_return_value(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
def assignment_func(_, __):
pass
self.kwargs.update(dict(assignment_func=assignment_func))
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
field_types = {
col.name: col.type for col in mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols
}
exprs = {("", "count"): "COUNT(*)"}
for col, col_type in list(field_types.items()):
exprs.update(hive_stats_collection_operator.get_default_exprs(col, col_type))
rows = [
(
hive_stats_collection_operator.ds,
hive_stats_collection_operator.dttm,
hive_stats_collection_operator.table,
mock_json_dumps.return_value,
)
+ (r[0][0], r[0][1], r[1])
for r in zip(exprs, mock_presto_hook.return_value.get_first.return_value)
]
mock_mysql_hook.return_value.insert_rows.assert_called_once_with(
table="hive_stats",
rows=rows,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
@patch("airflow.providers.apache.hive.operators.hive_stats.MySqlHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.PrestoHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook")
def test_execute_no_query_results(self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = False
mock_presto_hook.return_value.get_first.return_value = None
with pytest.raises(AirflowException):
HiveStatsCollectionOperator(**self.kwargs).execute(context={})
@patch("airflow.providers.apache.hive.operators.hive_stats.json.dumps")
@patch("airflow.providers.apache.hive.operators.hive_stats.MySqlHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.PrestoHook")
@patch("airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook")
def test_execute_delete_previous_runs_rows(
self, mock_hive_metastore_hook, mock_presto_hook, mock_mysql_hook, mock_json_dumps
):
mock_hive_metastore_hook.return_value.get_table.return_value.sd.cols = [fake_col]
mock_mysql_hook.return_value.get_records.return_value = True
hive_stats_collection_operator = HiveStatsCollectionOperator(**self.kwargs)
hive_stats_collection_operator.execute(context={})
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{hive_stats_collection_operator.table}' AND
partition_repr='{mock_json_dumps.return_value}' AND
dttm='{hive_stats_collection_operator.dttm}';
"""
mock_mysql_hook.return_value.run.assert_called_once_with(sql)
@pytest.mark.skipif(
"AIRFLOW_RUNALL_TESTS" not in os.environ, reason="Skipped because AIRFLOW_RUNALL_TESTS is not set"
)
@patch(
"airflow.providers.apache.hive.operators.hive_stats.HiveMetastoreHook",
side_effect=MockHiveMetastoreHook,
)
def test_runs_for_hive_stats(self, mock_hive_metastore_hook):
mock_mysql_hook = MockMySqlHook()
mock_presto_hook = MockPrestoHook()
with patch(
"airflow.providers.apache.hive.operators.hive_stats.PrestoHook", return_value=mock_presto_hook
):
with patch(
"airflow.providers.apache.hive.operators.hive_stats.MySqlHook", return_value=mock_mysql_hook
):
op = HiveStatsCollectionOperator(
task_id="hive_stats_check",
table="airflow.static_babynames_partitioned",
partition={"ds": DEFAULT_DATE_DS},
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
select_count_query = (
"SELECT COUNT(*) AS __count FROM airflow.static_babynames_partitioned WHERE ds = '2015-01-01';"
)
mock_presto_hook.get_first.assert_called_with(hql=select_count_query)
expected_stats_select_query = (
"SELECT 1 "
"FROM hive_stats "
"WHERE table_name='airflow.static_babynames_partitioned' "
' AND partition_repr=\'{"ds": "2015-01-01"}\' '
" AND dttm='2015-01-01T00:00:00+00:00' "
"LIMIT 1;"
)
raw_stats_select_query = mock_mysql_hook.get_records.call_args_list[0][0][0]
actual_stats_select_query = re.sub(r"\s{2,}", " ", raw_stats_select_query).strip()
assert expected_stats_select_query == actual_stats_select_query
insert_rows_val = [
(
"2015-01-01",
"2015-01-01T00:00:00+00:00",
"airflow.static_babynames_partitioned",
'{"ds": "2015-01-01"}',
"",
"count",
["val_0", "val_1"],
)
]
mock_mysql_hook.insert_rows.assert_called_with(
table="hive_stats",
rows=insert_rows_val,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
| TestHiveStatsCollectionOperator |
python | doocs__leetcode | solution/1500-1599/1592.Rearrange Spaces Between Words/Solution.py | {
"start": 0,
"end": 297
} | class ____:
def reorderSpaces(self, text: str) -> str:
spaces = text.count(" ")
words = text.split()
if len(words) == 1:
return words[0] + " " * spaces
cnt, mod = divmod(spaces, len(words) - 1)
return (" " * cnt).join(words) + " " * mod
| Solution |
python | scipy__scipy | scipy/stats/_multivariate.py | {
"start": 53946,
"end": 69787
} | class ____(multi_rv_generic):
r"""A matrix t-random variable.
The `mean` keyword specifies the mean.
The `row_spread` keyword specifies the row-wise spread matrix.
The `col_spread` keyword specifies the column-wise spread matrix.
Methods
-------
pdf(x, mean=None, row_spread=None, col_spread=None)
Probability density function.
logpdf(x, mean=None, row_spread=None, col_spread=None)
Log of the probability density function.
rvs(mean=None, row_spread=1, col_spread=1, df=1, size=1, random_state=None)
Draw random samples.
Parameters
----------
%(_matt_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_matt_doc_callparams_note)s
The spread matrices specified by `row_spread` and `col_spread` must be
(symmetric) positive definite. If the samples in `X` have shape `(m,n)`
then `row_spread` must have shape `(m,m)` and `col_spread` must have shape `(n,n)`.
Spread matrices must be full rank.
The probability density function for `matrix_t` is
.. math::
f(X \vert \mathrm{M}, \Sigma, \Omega, \mathrm{df}) =
\frac{
\Gamma_n \left(
\frac{\mathrm{df} + m + n - 1}{2}
\right)
\left(
\det \left(
I_n + (X - \mathrm{M})^T \Sigma^{-1} (X - \mathrm{M}) \Omega^{-1}
\right)
\right)^{ -\frac{\mathrm{df} + m + n - 1}{2} }
}{
\Gamma_n \left(
\frac{\mathrm{df} + n - 1}{2}
\right)
\pi^{mn / 2}
\left( \det \Sigma \right)^{n/2}
\left( \det \Omega \right)^{m/2}
}
or, alternatively,
.. math::
f(X \vert \mathrm{M}, \Sigma, \Omega, \mathrm{df}) =
\frac{
\Gamma_m \left(
\frac{\mathrm{df} + m + n - 1}{2}
\right)
\left(
\det \left(
I_m + \Sigma^{-1} (X - \mathrm{M}) \Omega^{-1} (X - \mathrm{M})^T
\right)
\right)^{ -\frac{\mathrm{df} + m + n - 1}{2} }
}{
\Gamma_m \left(
\frac{\mathrm{df} + n - 1}{2}
\right)
\pi^{mn / 2}
\left( \det \Sigma \right)^{n/2}
\left( \det \Omega \right)^{m/2}
}
where :math:`\mathrm{M}` is the mean,
:math:`\Sigma` is the row-wise spread matrix,
:math:`\Omega` is the column-wise matrix,
:math:`\mathrm{df}` is the degrees of freedom,
and :math:`\Gamma_n` is the multivariate gamma function.
These equivalent formulations come from the identity
.. math::
\det\left( I_m + A B \right) = \det\left( I_n + B A \right)
for :math:`m \times n` arrays :math:`A` and :math:`B^T`
and the fact that
:math:`\gamma_n(\mathrm{df} + m) / \gamma_n(\mathrm{df})`
is equal to
:math:`\gamma_m(\mathrm{df} + n) / \gamma_m(\mathrm{df})`,
where
.. math::
\gamma_m(\mathrm{df}) = 2^{m(m-1)/2}
\Gamma_m\left( (\mathrm{df} + m - 1) / 2 \right)
denotes a normalized multivariate gamma function.
When :math:`\mathrm{df} = 1` this distribution is known as the matrix
variate Cauchy.
.. versionadded:: 1.17.0
References
----------
.. [1] Gupta, A.K., & Nagar, D.K. (2000). Matrix Variate Distributions (1st ed.).
Chapman and Hall/CRC.
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_t
>>> M = np.arange(6).reshape(3,2)
>>> M
array([[0, 1],
[2, 3],
[4, 5]])
>>> Sigma = np.diag([1,2,3])
>>> Sigma
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> Omega = 0.3*np.identity(2)
>>> Omega
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1
>>> X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> df = 3
>>> matrix_t.pdf(X, mean=M, row_spread=Sigma, col_spread=Omega, df=df)
0.9972880280135796
Alternatively, the object may be called (as a function) to fix the mean
and spread parameters, returning a "frozen" matrix t
random variable:
>>> rv = matrix_t(mean=None, row_spread=1, col_spread=1, df=1)
>>> # Frozen object with the same methods but holding the given
>>> # mean and spreads and degrees of freedom fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = scipy._lib.doccer.docformat(
self.__doc__, matrix_t_docdict_params
)
def __call__(self, mean=None, row_spread=1, col_spread=1, df=None, seed=None):
"""Create a frozen matrix t distribution.
See `matrix_t_frozen` for more information.
"""
return matrix_t_frozen(mean, row_spread, col_spread, df, seed)
def _process_parameters(self, mean, row_spread, col_spread, df):
"""
Infer dimensionality from mean or covariance matrices.
Handle defaults. Ensure conformality.
Parameters
----------
mean : ndarray, shape (m,n)
Mean of the distribution
row_spread : ndarray, shape (m,m)
Row-wise spread matrix
col_spread : ndarray, shape (n,n)
Column-wise spread matrix
df : float
Degrees of freedom
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if 0 in meanshape:
raise ValueError("Array `mean` has invalid shape.")
if len(meanshape) != 2:
raise ValueError("Array `mean` must be 2D.")
# Process row-wise spread
row_spread = np.asarray(row_spread, dtype=float)
if row_spread.ndim == 0:
if mean is not None:
row_spread = row_spread * np.identity(meanshape[0])
else:
row_spread = row_spread * np.identity(1)
elif row_spread.ndim == 1:
row_spread = np.diag(row_spread)
rowshape = row_spread.shape
if 0 in rowshape:
raise ValueError("Array `row_spread` has invalid shape.")
if len(rowshape) != 2:
raise ValueError("Array `row_spread` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `row_spread` must be square.")
numrows = rowshape[0]
# Process column-wise spread
col_spread = np.asarray(col_spread, dtype=float)
if col_spread.ndim == 0:
if mean is not None:
col_spread = col_spread * np.identity(meanshape[1])
else:
col_spread = col_spread * np.identity(1)
elif col_spread.ndim == 1:
col_spread = np.diag(col_spread)
colshape = col_spread.shape
if 0 in colshape:
raise ValueError("Array `col_spread` has invalid shape.")
if len(colshape) != 2:
raise ValueError("Array `col_spread` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `col_spread` must be square.")
numcols = colshape[0]
# Ensure mean and spreads are conformal
if mean is not None:
if meanshape[0] != numrows:
raise ValueError(
"Arrays `mean` and `row_spread` must have the same number of rows."
)
if meanshape[1] != numcols:
raise ValueError(
"Arrays `mean` and `col_spread` must have the same number "
"of columns."
)
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
if df is None:
df = 1 # default to matrix variate Cauchy
elif not np.isscalar(df):
raise ValueError("Degrees of freedom must be a scalar.")
elif df <= 0:
raise ValueError("Degrees of freedom must be positive.")
return dims, mean, row_spread, col_spread, df
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the component of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError(
"The shape of array `X` is not conformal with "
"the distribution parameters."
)
return X
def _logpdf(
self,
dims,
X,
mean,
df,
invrow_spread,
invcol_spread,
logdetrow_spread,
logdetcol_spread,
):
"""
Log of the matrix t probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray, shape (m,n) (equal to `dims`)
Points at which to evaluate the log of the probability density function
mean : ndarray, shape (m,n)
Mean of the distribution
df : float
Degrees-of-freedom parameter
invrow_spread : ndarray, shape (m,m)
Inverse of the row-wise spread matrix
invcol_spread : ndarray, shape (n,n)
Inverse of the column-wise spread matrix
logdetrow_spread : float
Log-determinant of the row-wise spread matrix
detcol_spread : float
Log-determinant of the column-wise spread matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use `logpdf` instead.
"""
m, n = dims
X_shape = X.shape
if X.ndim > 3:
X = X.reshape(-1, m, n)
X_centered = X - mean[np.newaxis, ...]
det_arg = np.identity(n) + np.einsum(
"nij,njk,nkl,nlp->nip",
X_centered.transpose(0, 2, 1),
invrow_spread[np.newaxis, ...],
X_centered,
invcol_spread[np.newaxis, ...],
optimize=True,
)
_, logdet = np.linalg.slogdet(det_arg)
log_d_mn = -((df + m + n - 1) / 2) * logdet
log_f_mn = (
scipy.special.multigammaln((df + m + n - 1) / 2, n)
- scipy.special.multigammaln((df + n - 1) / 2, n)
- (m * n / 2) * _LOG_PI
- (n / 2) * logdetrow_spread
- (m / 2) * logdetcol_spread
)
retval = log_d_mn + log_f_mn
if len(X_shape) > 3:
retval = retval.reshape(X_shape[:-2])
return retval
def logpdf(self, X, mean=None, row_spread=1, col_spread=1, df=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matt_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matt_doc_callparams_note)s
Examples
-------
>>> import numpy as np
>>> from scipy.stats import matrix_t
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> Sigma = np.diag([1,2,3]); Sigma
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> Omega = 0.3*np.identity(2); Omega
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> df = 3; df
3
>>> matrix_t.logpdf(X, mean=M, row_spread=Sigma, col_spread=Omega, df=df)
-0.002715656044664061
"""
dims, mean, row_spread, col_spread, df = self._process_parameters(
mean, row_spread, col_spread, df
)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(row_spread, allow_singular=False)
colpsd = _PSD(col_spread, allow_singular=False)
invrow_spread = rowpsd.pinv
invcol_spread = colpsd.pinv
logdetrow_spread = rowpsd.log_pdet
logdetcol_spread = colpsd.log_pdet
out = self._logpdf(
dims,
X,
mean,
df,
invrow_spread,
invcol_spread,
logdetrow_spread,
logdetcol_spread,
)
return _squeeze_output(out)
def pdf(self, X, mean=None, row_spread=1, col_spread=1, df=1):
"""Matrix t probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matt_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matt_doc_callparams_note)s
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_t
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> Sigma = np.diag([1,2,3]); Sigma
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> Omega = 0.3*np.identity(2); Omega
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> df = 3; df
3
>>> matrix_t.logpdf(X, mean=M, row_spread=Sigma, col_spread=Omega, df=df)
0.9972880280135796
"""
return np.exp(self.logpdf(X, mean, row_spread, col_spread, df))
def rvs(
self, mean=None, row_spread=1, col_spread=1, df=1, size=1, random_state=None
) -> np.ndarray:
"""Draw random samples from a matrix t distribution.
Parameters
----------
%(_matt_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matt_doc_callparams_note)s
This method takes advantage of the two equivalent expressions of the
probability density function. It samples a Cholesky factor of a
random variate of the appropriate inverse Wishart distribution using
the smaller of the row/column dimensions.
"""
size = int(size)
dims, mean, row_spread, col_spread, df = self._process_parameters(
mean, row_spread, col_spread, df
)
random_state = self._get_random_state(random_state)
# see scipy.stats.matrix_normal.rvs
std_norm = random_state.standard_normal(
size=(dims[1], size, dims[0])
).transpose(1, 2, 0)
if dims[0] <= dims[1]:
rowchol = _cholesky_invwishart_rvs(df, row_spread, size, random_state)
colchol = scipy.linalg.cholesky(col_spread, lower=True)[np.newaxis, ...]
else:
rowchol = scipy.linalg.cholesky(row_spread, lower=True)[np.newaxis, ...]
colchol = _cholesky_invwishart_rvs(df, col_spread, size, random_state)
t_raw = np.einsum("ijp,ipq,ikq->ijk", rowchol, std_norm, colchol, optimize=True)
t_centered = mean[np.newaxis, ...] + t_raw
if size == 1:
t_centered = t_centered.reshape(mean.shape)
return t_centered
matrix_t = matrix_t_gen()
| matrix_t_gen |
python | google__jax | jax/_src/debugger/colab_debugger.py | {
"start": 6866,
"end": 7854
} | class ____(cli_debugger.CliDebugger):
"""A JAX debugger for a Colab environment."""
def __init__(self,
frames: list[debugger_core.DebuggerFrame],
thread_id: int):
super().__init__(frames, thread_id)
self._debugger_view = DebuggerView(self.current_frame())
self.stdout = self.stdin = self._debugger_view # type: ignore
def do_up(self, arg):
super().do_up(arg)
self._debugger_view.update_frame(self.current_frame())
return False
def do_down(self, arg):
super().do_down(arg)
self._debugger_view.update_frame(self.current_frame())
return False
def run(self):
self._debugger_view.render()
while True:
if not self.cmdloop():
return
def _run_debugger(frames, thread_id, **kwargs):
try:
ColabDebugger(frames, thread_id, **kwargs).run()
except Exception:
traceback.print_exc()
if colab_lib.IS_COLAB_ENABLED:
debugger_core.register_debugger("colab", _run_debugger, 1)
| ColabDebugger |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 66588,
"end": 70612
} | class ____(Response):
"""
Response of workers.get_stats endpoint.
:param workers: List of the requested workers with their statistics
:type workers: Sequence[WorkerStats]
"""
_service = "workers"
_action = "get_stats"
_version = "2.13"
_schema = {
"definitions": {
"aggregation_stats": {
"properties": {
"aggregation": {
"oneOf": [
{"$ref": "#/definitions/aggregation_type"},
{"type": "null"},
]
},
"values": {
"description": "List of values corresponding to the dates in metric statistics",
"items": {"type": "number"},
"type": ["array", "null"],
},
},
"type": "object",
},
"aggregation_type": {
"description": "Metric aggregation type",
"enum": ["avg", "min", "max"],
"type": "string",
},
"metric_stats": {
"properties": {
"dates": {
"description": "List of timestamps (in seconds from epoch) in the acceding order. The timestamps are separated by the requested interval. Timestamps where no workers activity was recorded are omitted.",
"items": {"type": "integer"},
"type": ["array", "null"],
},
"metric": {
"description": "Name of the metric (cpu_usage, memory_used etc.)",
"type": ["string", "null"],
},
"stats": {
"description": "Statistics data by type",
"items": {"$ref": "#/definitions/aggregation_stats"},
"type": ["array", "null"],
},
"variant": {
"description": "Name of the metric component. Set only if 'split_by_variant' was set in the request",
"type": ["string", "null"],
},
},
"type": "object",
},
"worker_stats": {
"properties": {
"metrics": {
"description": "List of the metrics statistics for the worker",
"items": {"$ref": "#/definitions/metric_stats"},
"type": ["array", "null"],
},
"worker": {
"description": "ID of the worker",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"workers": {
"description": "List of the requested workers with their statistics",
"items": {"$ref": "#/definitions/worker_stats"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, workers: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetStatsResponse, self).__init__(**kwargs)
self.workers = workers
@schema_property("workers")
def workers(self) -> Optional[List[Any]]:
return self._property_workers
@workers.setter
def workers(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_workers = None
return
self.assert_isinstance(value, "workers", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [WorkerStats.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "workers", WorkerStats, is_array=True)
self._property_workers = value
| GetStatsResponse |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/CustomGraphItem.py | {
"start": 338,
"end": 3408
} | class ____(pg.GraphItem):
def __init__(self):
self.dragPoint = None
self.dragOffset = None
self.textItems = []
pg.GraphItem.__init__(self)
self.scatter.sigClicked.connect(self.clicked)
def setData(self, **kwds):
self.text = kwds.pop('text', [])
self.data = kwds
if 'pos' in self.data:
npts = self.data['pos'].shape[0]
self.data['data'] = np.empty(npts, dtype=[('index', int)])
self.data['data']['index'] = np.arange(npts)
self.setTexts(self.text)
self.updateGraph()
def setTexts(self, text):
for i in self.textItems:
i.scene().removeItem(i)
self.textItems = []
for t in text:
item = pg.TextItem(t)
self.textItems.append(item)
item.setParentItem(self)
def updateGraph(self):
pg.GraphItem.setData(self, **self.data)
for i,item in enumerate(self.textItems):
item.setPos(*self.data['pos'][i])
def mouseDragEvent(self, ev):
if ev.button() != QtCore.Qt.MouseButton.LeftButton:
ev.ignore()
return
if ev.isStart():
# We are already one step into the drag.
# Find the point(s) at the mouse cursor when the button was first
# pressed:
pos = ev.buttonDownPos()
pts = self.scatter.pointsAt(pos)
if len(pts) == 0:
ev.ignore()
return
self.dragPoint = pts[0]
ind = pts[0].data()[0]
self.dragOffset = self.data['pos'][ind] - pos
elif ev.isFinish():
self.dragPoint = None
return
else:
if self.dragPoint is None:
ev.ignore()
return
ind = self.dragPoint.data()[0]
self.data['pos'][ind] = ev.pos() + self.dragOffset
self.updateGraph()
ev.accept()
def clicked(self, pts):
print("clicked: %s" % pts)
g = Graph()
v.addItem(g)
## Define positions of nodes
pos = np.array([
[0,0],
[10,0],
[0,10],
[10,10],
[5,5],
[15,5]
], dtype=float)
## Define the set of connections in the graph
adj = np.array([
[0,1],
[1,3],
[3,2],
[2,0],
[1,5],
[3,5],
])
## Define the symbol to use for each node (this is optional)
symbols = ['o','o','o','o','t','+']
## Define the line style for each connection (this is optional)
lines = np.array([
(255,0,0,255,1),
(255,0,255,255,2),
(255,0,255,255,3),
(255,255,0,255,2),
(255,0,0,255,1),
(255,255,255,255,4),
], dtype=[('red',np.ubyte),('green',np.ubyte),('blue',np.ubyte),('alpha',np.ubyte),('width',float)])
## Define text to show next to each symbol
texts = ["Point %d" % i for i in range(6)]
## Update the graph
g.setData(pos=pos, adj=adj, pen=lines, size=1, symbol=symbols, pxMode=False, text=texts)
if __name__ == '__main__':
pg.exec()
| Graph |
python | automl__auto-sklearn | autosklearn/metalearning/metalearning/meta_base.py | {
"start": 658,
"end": 782
} | class ____(object):
def __init__(self, name, features):
self.name = name
self.features = features
| Instance |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/log/stackdriver_task_handler.py | {
"start": 2332,
"end": 15940
} | class ____(logging.Handler):
"""
Handler that directly makes Stackdriver logging API calls.
This is a Python standard ``logging`` handler using that can be used to
route Python standard logging messages directly to the Stackdriver
Logging API.
It can also be used to save logs for executing tasks. To do this, you should set as a handler with
the name "tasks". In this case, it will also be used to read the log for display in Web UI.
This handler supports both an asynchronous and synchronous transport.
:param gcp_key_path: Path to Google Cloud Credential JSON file.
If omitted, authorization based on `the Application Default Credentials
<https://cloud.google.com/docs/authentication/production#finding_credentials_automatically>`__ will
be used.
:param scopes: OAuth scopes for the credentials,
:param name: the name of the custom log in Stackdriver Logging. Defaults
to 'airflow'. The name of the Python logger will be represented
in the ``python_logger`` field.
:param transport: Class for creating new transport objects. It should
extend from the base :class:`google.cloud.logging.handlers.Transport` type and
implement :meth`google.cloud.logging.handlers.Transport.send`. Defaults to
:class:`google.cloud.logging.handlers.BackgroundThreadTransport`. The other
option is :class:`google.cloud.logging.handlers.SyncTransport`.
:param resource: (Optional) Monitored resource of the entry, defaults
to the global resource type.
:param labels: (Optional) Mapping of labels for the entry.
"""
LABEL_TASK_ID = "task_id"
LABEL_DAG_ID = "dag_id"
LABEL_LOGICAL_DATE = "logical_date" if AIRFLOW_V_3_0_PLUS else "execution_date"
LABEL_TRY_NUMBER = "try_number"
LOG_VIEWER_BASE_URL = "https://console.cloud.google.com/logs/viewer"
LOG_NAME = "Google Stackdriver"
trigger_supported = True
trigger_should_queue = False
trigger_should_wrap = False
trigger_send_end_marker = False
def __init__(
self,
gcp_key_path: str | None = None,
scopes: Collection[str] | None = _DEFAULT_SCOPESS,
name: str | ArgNotSet = NOTSET,
transport: type[Transport] = BackgroundThreadTransport,
resource: Resource = _GLOBAL_RESOURCE,
labels: dict[str, str] | None = None,
gcp_log_name: str = DEFAULT_LOGGER_NAME,
):
if name is not NOTSET:
warnings.warn(
"Param `name` is deprecated and will be removed in a future release. "
"Please use `gcp_log_name` instead. ",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
gcp_log_name = str(name)
super().__init__()
self.gcp_key_path: str | None = gcp_key_path
self.scopes: Collection[str] | None = scopes
self.gcp_log_name: str = gcp_log_name
self.transport_type: type[Transport] = transport
self.resource: Resource = resource
self.labels: dict[str, str] | None = labels
self.task_instance_labels: dict[str, str] | None = {}
self.task_instance_hostname = "default-hostname"
@cached_property
def _credentials_and_project(self) -> tuple[Credentials, str]:
credentials, project = get_credentials_and_project_id(
key_path=self.gcp_key_path, scopes=self.scopes, disable_logging=True
)
return credentials, project
@property
def _client(self) -> gcp_logging.Client:
"""The Cloud Library API client."""
credentials, project = self._credentials_and_project
client = gcp_logging.Client(
credentials=credentials,
project=project,
client_info=CLIENT_INFO,
)
return client
@property
def _logging_service_client(self) -> LoggingServiceV2Client:
"""The Cloud logging service v2 client."""
credentials, _ = self._credentials_and_project
client = LoggingServiceV2Client(
credentials=credentials,
client_info=CLIENT_INFO,
)
return client
@cached_property
def _transport(self) -> Transport:
"""Object responsible for sending data to Stackdriver."""
# The Transport object is badly defined (no init) but in the docs client/name as constructor
# arguments are a requirement for any class that derives from Transport class, hence ignore:
return self.transport_type(self._client, self.gcp_log_name)
def _get_labels(self, task_instance=None):
if task_instance:
ti_labels = self._task_instance_to_labels(task_instance)
else:
ti_labels = self.task_instance_labels
labels: dict[str, str] | None
if self.labels and ti_labels:
labels = {}
labels.update(self.labels)
labels.update(ti_labels)
elif self.labels:
labels = self.labels
elif ti_labels:
labels = ti_labels
else:
labels = None
return labels or {}
def emit(self, record: logging.LogRecord) -> None:
"""
Actually log the specified logging record.
:param record: The record to be logged.
"""
message = self.format(record)
ti = None
if not AIRFLOW_V_3_0_PLUS and getattr(record, ctx_indiv_trigger.name, None):
ti = getattr(record, "task_instance", None) # trigger context
labels = self._get_labels(ti)
self._transport.send(record, message, resource=self.resource, labels=labels)
def set_context(self, task_instance: TaskInstance) -> None:
"""
Configure the logger to add information with information about the current task.
:param task_instance: Currently executed task
"""
self.task_instance_labels = self._task_instance_to_labels(task_instance)
self.task_instance_hostname = task_instance.hostname
def read(
self, task_instance: TaskInstance, try_number: int | None = None, metadata: dict | None = None
) -> tuple[list[tuple[tuple[str, str]]], list[dict[str, str | bool]]]:
"""
Read logs of given task instance from Stackdriver logging.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from. If None
it returns all logs
:param metadata: log metadata. It is used for steaming log reading and auto-tailing.
:return: a tuple of (
list of (one element tuple with two element tuple - hostname and logs)
and list of metadata)
"""
if try_number is not None and try_number < 1:
logs = f"Error fetching the logs. Try number {try_number} is invalid."
return [((self.task_instance_hostname, logs),)], [{"end_of_log": "true"}]
if not metadata:
metadata = {}
ti_labels = self._task_instance_to_labels(task_instance)
if try_number is not None:
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
else:
del ti_labels[self.LABEL_TRY_NUMBER]
log_filter = self._prepare_log_filter(ti_labels)
next_page_token = metadata.get("next_page_token", None)
all_pages = "download_logs" in metadata and metadata["download_logs"]
messages, end_of_log, next_page_token = self._read_logs(log_filter, next_page_token, all_pages)
new_metadata: dict[str, str | bool] = {"end_of_log": end_of_log}
if next_page_token:
new_metadata["next_page_token"] = next_page_token
return [((self.task_instance_hostname, messages),)], [new_metadata]
def _prepare_log_filter(self, ti_labels: dict[str, str]) -> str:
"""
Prepare the filter that chooses which log entries to fetch.
More information:
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list#body.request_body.FIELDS.filter
https://cloud.google.com/logging/docs/view/advanced-queries
:param ti_labels: Task Instance's labels that will be used to search for logs
:return: logs filter
"""
def escape_label_key(key: str) -> str:
return f'"{key}"' if "." in key else key
def escale_label_value(value: str) -> str:
escaped_value = value.replace("\\", "\\\\").replace('"', '\\"')
return f'"{escaped_value}"'
_, project = self._credentials_and_project
log_filters = [
f"resource.type={escale_label_value(self.resource.type)}",
f'logName="projects/{project}/logs/{self.gcp_log_name}"',
]
for key, value in self.resource.labels.items():
log_filters.append(f"resource.labels.{escape_label_key(key)}={escale_label_value(value)}")
for key, value in ti_labels.items():
log_filters.append(f"labels.{escape_label_key(key)}={escale_label_value(value)}")
return "\n".join(log_filters)
def _read_logs(
self, log_filter: str, next_page_token: str | None, all_pages: bool
) -> tuple[str, bool, str | None]:
"""
Send requests to the Stackdriver service and downloads logs.
:param log_filter: Filter specifying the logs to be downloaded.
:param next_page_token: The token of the page from which the log download will start.
If None is passed, it will start from the first page.
:param all_pages: If True is passed, all subpages will be downloaded. Otherwise, only the first
page will be downloaded
:return: A token that contains the following items:
* string with logs
* Boolean value describing whether there are more logs,
* token of the next page
"""
messages = []
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter,
page_token=next_page_token,
)
messages.append(new_messages)
if all_pages:
while next_page_token:
new_messages, next_page_token = self._read_single_logs_page(
log_filter=log_filter, page_token=next_page_token
)
messages.append(new_messages)
if not messages:
break
end_of_log = True
next_page_token = None
else:
end_of_log = not bool(next_page_token)
return "\n".join(messages), end_of_log, next_page_token
def _read_single_logs_page(self, log_filter: str, page_token: str | None = None) -> tuple[str, str]:
"""
Send requests to the Stackdriver service and downloads single pages with logs.
:param log_filter: Filter specifying the logs to be downloaded.
:param page_token: The token of the page to be downloaded. If None is passed, the first page will be
downloaded.
:return: Downloaded logs and next page token
"""
_, project = self._credentials_and_project
request = ListLogEntriesRequest(
resource_names=[f"projects/{project}"],
filter=log_filter,
page_token=page_token,
order_by="timestamp asc",
page_size=1000,
)
response = self._logging_service_client.list_log_entries(request=request)
page: ListLogEntriesResponse = next(response.pages)
messages: list[str] = []
for entry in page.entries:
if "message" in (entry.json_payload or {}):
messages.append(entry.json_payload["message"]) # type: ignore
elif entry.text_payload:
messages.append(entry.text_payload)
return "\n".join(messages), page.next_page_token
@classmethod
def _task_instance_to_labels(cls, ti: TaskInstance) -> dict[str, str]:
return {
cls.LABEL_TASK_ID: ti.task_id,
cls.LABEL_DAG_ID: ti.dag_id,
cls.LABEL_LOGICAL_DATE: str(ti.logical_date.isoformat())
if AIRFLOW_V_3_0_PLUS
else str(ti.execution_date.isoformat()),
cls.LABEL_TRY_NUMBER: str(ti.try_number),
}
@property
def log_name(self):
"""Return log name."""
return self.LOG_NAME
@cached_property
def _resource_path(self):
segments = [self.resource.type]
for key, value in self.resource.labels:
segments += [key]
segments += [value]
return "/".join(segments)
def get_external_log_url(self, task_instance: TaskInstance, try_number: int) -> str:
"""
Create an address for an external log collecting service.
:param task_instance: task instance object
:param try_number: task instance try_number to read logs from
:return: URL to the external log collection service
"""
_, project_id = self._credentials_and_project
ti_labels = self._task_instance_to_labels(task_instance)
ti_labels[self.LABEL_TRY_NUMBER] = str(try_number)
log_filter = self._prepare_log_filter(ti_labels)
url_query_string = {
"project": project_id,
"interval": "NO_LIMIT",
"resource": self._resource_path,
"advancedFilter": log_filter,
}
url = f"{self.LOG_VIEWER_BASE_URL}?{urlencode(url_query_string)}"
return url
def close(self) -> None:
self._transport.flush()
| StackdriverTaskHandler |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_value_z_scores_to_be_less_than.py | {
"start": 2113,
"end": 12605
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnValueZScoresToBeLessThan is a \
Column Map Expectation \
for typed-column backends, and also for PandasExecutionEngine where the column \
dtype and provided type_ are unambiguous constraints \
(any dtype except 'object' or dtype of 'object' with type_ specified as 'object').
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
threshold (number): \
{THRESHOLD_DESCRIPTION}
double_sided (boolean): \
{DOUBLE_SIDED_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1.
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 -100000000000
1 1 -1
2 1 0
3 3 1
4 3 1
Code Examples:
Passing Case:
Input:
ExpectColumnValueZScoresToBeLessThan(
column="test",
threshold=1.96,
double_sided=True
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 5,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValueZScoresToBeLessThan(
column="test2",
threshold=1,
double_sided=True
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 5,
"unexpected_count": 1,
"unexpected_percent": 20.0,
"partial_unexpected_list": [
-100000000000
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 20.0,
"unexpected_percent_nonmissing": 20.0
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
threshold: Union[float, SuiteParameterDict] = pydantic.Field(description=THRESHOLD_DESCRIPTION)
double_sided: Union[bool, SuiteParameterDict] = pydantic.Field(
description=DOUBLE_SIDED_DESCRIPTION
)
domain_keys: ClassVar[Tuple[str, ...]] = (
"column",
"row_condition",
"condition_parser",
)
# This dictionary contains metadata for display in the public gallery
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\ # noqa: E501 # FIXME CoP
map_metric = "column_values.z_score.under_threshold"
success_keys = ("threshold", "double_sided", "mostly")
args_keys = ("column", "threshold")
class Config:
title = "Expect column value z-scores to be less than"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValueZScoresToBeLessThan]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@override
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("threshold", RendererValueType.NUMBER),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if renderer_configuration.include_column_name:
template_str = "$column value z-scores must be "
else:
template_str = "Value z-scores must be "
if renderer_configuration.kwargs.get("double_sided") is True:
inverse_threshold = params.threshold.value * -1
renderer_configuration.add_param(
name="inverse_threshold",
param_type=RendererValueType.NUMBER,
value=inverse_threshold,
)
if inverse_threshold < params.threshold.value:
template_str += "greater than $inverse_threshold and less than $threshold"
else:
template_str += "greater than $threshold and less than $inverse_threshold"
else:
template_str += "less than $threshold"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
renderer_configuration.template_str = template_str
return renderer_configuration
| ExpectColumnValueZScoresToBeLessThan |
python | keras-team__keras | keras/src/callbacks/remote_monitor_test.py | {
"start": 373,
"end": 3900
} | class ____(testing.TestCase):
def test_RemoteMonitor(self):
if requests is None:
self.skipTest("`requests` required to run this test")
monitor = callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
warning_msg = "Could not reach RemoteMonitor root server"
with warnings.catch_warnings(record=True) as warning_logs:
warnings.simplefilter("always")
monitor.on_epoch_end(0, logs={"loss": 0.0})
self.assertIn(warning_msg, str(warning_logs[-1].message))
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest("`requests` required to run this test")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {"loss": 0.0, "val": a}
monitor.on_epoch_end(0, logs=logs)
send = {"loss": 0.0, "epoch": 0, "val": 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
@skip_if_backend(
"openvino", "openvino backend does not support `fit` method"
)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest("`requests` required to run this test")
if backend.backend() == "numpy":
self.skipTest("Trainer not implemented from NumPy backend.")
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
INPUT_DIM = 3
NUM_CLASSES = 2
BATCH_SIZE = 4
np.random.seed(1337)
x_train = np.random.random((TRAIN_SAMPLES, INPUT_DIM))
y_train = np.random.choice(np.arange(NUM_CLASSES), size=TRAIN_SAMPLES)
x_test = np.random.random((TEST_SAMPLES, INPUT_DIM))
y_test = np.random.choice(np.arange(NUM_CLASSES), size=TEST_SAMPLES)
y_test = numerical_utils.to_categorical(y_test)
y_train = numerical_utils.to_categorical(y_train)
model = Sequential([layers.Dense(NUM_CLASSES)])
model.compile(loss="mean_squared_error", optimizer="sgd")
with mock.patch("requests.post") as requests_post:
monitor = callbacks.RemoteMonitor(send_as_json=True)
hist = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=[monitor],
epochs=1,
)
send = {
"epoch": 0,
"loss": hist.history["loss"][0],
"val_loss": hist.history["val_loss"][0],
}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers
)
| TerminateOnNaNTest |
python | getsentry__sentry | src/sentry/integrations/bitbucket_server/repository.py | {
"start": 539,
"end": 6171
} | class ____(IntegrationRepositoryProvider):
name = "Bitbucket Server"
repo_provider = IntegrationProviderSlug.BITBUCKET_SERVER.value
def get_repository_data(self, organization, config):
installation = self.get_installation(config.get("installation"), organization.id)
client = installation.get_client()
try:
project, repo = config["identifier"].split("/", 1)
repo = client.get_repo(project, repo)
except Exception as e:
installation.raise_error(e)
else:
config["external_id"] = str(repo["id"])
config["name"] = repo["project"]["key"] + "/" + repo["name"]
config["project"] = repo["project"]["key"]
config["repo"] = repo["name"]
return config
def build_repository_config(
self, organization: RpcOrganization, data: dict[str, Any]
) -> RepositoryConfig:
installation = self.get_installation(data.get("installation"), organization.id)
client = installation.get_client()
try:
resp = client.create_hook(
data["project"],
data["repo"],
{
"name": "sentry-bitbucket-server-repo-hook",
"url": absolute_uri(
reverse(
"sentry-extensions-bitbucketserver-webhook",
kwargs={
"organization_id": organization.id,
"integration_id": data.get("installation"),
},
)
),
"active": True,
"events": ["repo:refs_changed", "pr:merged"],
},
)
except Exception as e:
installation.raise_error(e)
else:
return {
"name": data["identifier"],
"external_id": data["external_id"],
"url": installation.model.metadata["base_url"]
+ "/projects/{project}/repos/{repo}/browse".format(
project=data["project"], repo=data["repo"]
),
"config": {
"name": data["identifier"],
"project": data["project"],
"repo": data["repo"],
"webhook_id": resp["id"],
},
"integration_id": data["installation"],
}
def on_delete_repository(self, repo):
installation = self.get_installation(repo.integration_id, repo.organization_id)
client = installation.get_client()
try:
client.delete_hook(
repo.config["project"], repo.config["repo"], repo.config["webhook_id"]
)
except ApiError as exc:
if exc.code == 404:
return
raise
def _format_commits(self, client, repo, commit_list):
return [
{
"id": c["id"],
"repository": repo.name,
"author_email": c["author"]["emailAddress"],
"author_name": c["author"].get("displayName", c["author"]["name"]),
"message": c["message"],
"timestamp": datetime.fromtimestamp(c["authorTimestamp"] / 1000, timezone.utc),
"patch_set": self._get_patchset(
client, repo.config["project"], repo.config["repo"], c["id"]
),
}
for c in commit_list
]
def compare_commits(self, repo, start_sha, end_sha):
installation = self.get_installation(repo.integration_id, repo.organization_id)
client = installation.get_client()
try:
if "0" * 40 == start_sha or start_sha is None:
commit_list = client.get_last_commits(repo.config["project"], repo.config["repo"])
else:
commit_list = client.get_commits(
repo.config["project"], repo.config["repo"], start_sha, end_sha
)
return self._format_commits(client, repo, commit_list)
except Exception as e:
installation.raise_error(e)
def repository_external_slug(self, repo):
return repo.name
def _get_patchset(self, client, project, repo, sha):
"""
Get the modified files for a commit
"""
key = f"get_changelist:{md5_text(project + repo).hexdigest()}:{sha}"
commit_files = cache.get(key)
if commit_files is None:
commit_files = client.get_commit_filechanges(project, repo, sha)
cache.set(key, commit_files, 900)
return self._transform_patchset(commit_files)
def _transform_patchset(self, values):
"""Convert the patch data from Bitbucket into our internal format
See sentry.models.Release.set_commits
"""
changes = []
for change in values:
if change["type"] == "MODIFY":
changes.append({"path": change["path"]["toString"], "type": "M"})
if change["type"] == "ADD":
changes.append({"path": change["path"]["toString"], "type": "A"})
if change["type"] == "DELETE":
changes.append({"path": change["path"]["toString"], "type": "D"})
if change["type"] == "MOVE":
changes.append({"path": change["srcPath"]["toString"], "type": "D"})
changes.append({"path": change["path"]["toString"], "type": "A"})
return changes
| BitbucketServerRepositoryProvider |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 20829,
"end": 21110
} | class ____(VyperNode):
"""
Inherited class for Module and FunctionDef nodes.
Class attributes
----------------
doc_string : Expr
Expression node representing the docstring within this node.
"""
__slots__ = ("body", "name", "doc_string")
| TopLevel |
python | rapidsai__cudf | python/cudf/cudf/core/column/column.py | {
"start": 4204,
"end": 5013
} | class ____:
# A wrapper that exposes the __cuda_array_interface__ of a buffer as read-only to
# avoid copy-on-write issues.
def __init__(self, buffer: Buffer, mode: Literal["read", "write"]) -> None:
self._buffer = buffer
self._mode = mode
@property
def owner(self) -> Buffer:
# This property is how get_buffer_owner in buffer/utils.py knows to access the
# owner transitively, which is needed for correctness with copy-on-write
return self._buffer
@property
def __cuda_array_interface__(self) -> Mapping:
return {
"data": (self._buffer.get_ptr(mode=self._mode), False),
"shape": (self._buffer.size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
| ROCAIWrapper |
python | Pylons__pyramid | tests/test_path.py | {
"start": 3972,
"end": 4527
} | class ____(unittest.TestCase):
def _callFUT(self, package):
from pyramid.path import package_of
return package_of(package)
def test_it_package(self):
import tests
package = DummyPackageOrModule(tests)
result = self._callFUT(package)
self.assertEqual(result, tests)
def test_it_module(self):
import tests
import tests.test_path
package = DummyPackageOrModule(tests.test_path)
result = self._callFUT(package)
self.assertEqual(result, tests)
| TestPackageOf |
python | altair-viz__altair | altair/utils/selection.py | {
"start": 2833,
"end": 3928
} | class ____:
"""
Represents the state of an alt.selection_interval().
The value field is a dict of the form:
{"dim1": [0, 10], "dim2": ["A", "BB", "CCC"]}
where "dim1" and "dim2" are dataset columns and the dict values
correspond to the selected range.
"""
name: str
value: dict[str, list]
store: Store
@staticmethod
def from_vega(name: str, signal: dict[str, list] | None, store: Store):
"""
Construct an IntervalSelection from the raw Vega signal and dataset values.
Parameters
----------
name: str
The selection's name
signal: dict or None
The value of the Vega signal corresponding to the selection
store: list
The value of the Vega dataset corresponding to the selection.
This dataset is named "{name}_store" in the Vega view.
Returns
-------
PointSelection
"""
if signal is None:
signal = {}
return IntervalSelection(name=name, value=signal, store=store)
| IntervalSelection |
python | pytorch__pytorch | torch/distributed/checkpoint/filesystem.py | {
"start": 2611,
"end": 3299
} | class ____(_TensorLoader):
def __init__(self, resolve_fun: Callable) -> None:
self.resolve_fun = resolve_fun
self.items: list[tuple[int, object]] = []
def add(self, size: int, obj: object) -> None:
self.items.append((size, obj))
def start_loading(self) -> None:
pass
def values(self) -> Iterator[tuple[torch.Tensor, object]]:
for _, obj in self.items:
tensor = self.resolve_fun(obj).detach()
tensor = tensor.cpu()
if tensor.storage().size() != tensor.numel():
tensor = tensor.clone()
yield (
tensor,
obj,
)
| _SerialCpuLoader |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/attributes.py | {
"start": 27768,
"end": 39995
} | class ____:
"""internal implementation for instrumented attributes."""
collection: bool
default_accepts_scalar_loader: bool
uses_objects: bool
supports_population: bool
dynamic: bool
_is_has_collection_adapter = False
_replace_token: AttributeEventToken
_remove_token: AttributeEventToken
_append_token: AttributeEventToken
def __init__(
self,
class_: _ExternalEntityType[_O],
key: str,
callable_: Optional[_LoaderCallable],
dispatch: _Dispatch[QueryableAttribute[Any]],
trackparent: bool = False,
compare_function: Optional[Callable[..., bool]] = None,
active_history: bool = False,
parent_token: Optional[AttributeEventToken] = None,
load_on_unexpire: bool = True,
send_modified_events: bool = True,
accepts_scalar_loader: Optional[bool] = None,
**kwargs: Any,
):
r"""Construct an AttributeImpl.
:param \class_: associated class
:param key: string name of the attribute
:param \callable_:
optional function which generates a callable based on a parent
instance, which produces the "default" values for a scalar or
collection attribute when it's first accessed, if not present
already.
:param trackparent:
if True, attempt to track if an instance has a parent attached
to it via this attribute.
:param compare_function:
a function that compares two values which are normally
assignable to this attribute.
:param active_history:
indicates that get_history() should always return the "old" value,
even if it means executing a lazy callable upon attribute change.
:param parent_token:
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
owner attribute.
:param load_on_unexpire:
if False, don't include this attribute in a load-on-expired
operation, i.e. the "expired_attribute_loader" process.
The attribute can still be in the "expired" list and be
considered to be "expired". Previously, this flag was called
"expire_missing" and is only used by a deferred column
attribute.
:param send_modified_events:
if False, the InstanceState._modified_event method will have no
effect; this means the attribute will never show up as changed in a
history entry.
"""
self.class_ = class_
self.key = key
self.callable_ = callable_
self.dispatch = dispatch
self.trackparent = trackparent
self.parent_token = parent_token or self
self.send_modified_events = send_modified_events
if compare_function is None:
self.is_equal = operator.eq
else:
self.is_equal = compare_function
if accepts_scalar_loader is not None:
self.accepts_scalar_loader = accepts_scalar_loader
else:
self.accepts_scalar_loader = self.default_accepts_scalar_loader
_deferred_history = kwargs.pop("_deferred_history", False)
self._deferred_history = _deferred_history
if active_history:
self.dispatch._active_history = True
self.load_on_unexpire = load_on_unexpire
self._modified_token = AttributeEventToken(self, OP_MODIFIED)
__slots__ = (
"class_",
"key",
"callable_",
"dispatch",
"trackparent",
"parent_token",
"send_modified_events",
"is_equal",
"load_on_unexpire",
"_modified_token",
"accepts_scalar_loader",
"_deferred_history",
)
def __str__(self) -> str:
return f"{self.class_.__name__}.{self.key}"
def _get_active_history(self):
"""Backwards compat for impl.active_history"""
return self.dispatch._active_history
def _set_active_history(self, value):
self.dispatch._active_history = value
active_history = property(_get_active_history, _set_active_history)
def hasparent(
self, state: InstanceState[Any], optimistic: bool = False
) -> bool:
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
should be if no `hasparent` flag can be located.
As this function is used to determine if an instance is an
*orphan*, instances that were loaded from storage should be
assumed to not be orphans, until a True/False value for this
flag is set.
An instance attribute that is loaded by a callable function
will also not have a `hasparent` flag.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
return (
state.parents.get(id(self.parent_token), optimistic) is not False
)
def sethasparent(
self,
state: InstanceState[Any],
parent_state: InstanceState[Any],
value: bool,
) -> None:
"""Set a boolean flag on the given item corresponding to
whether or not it is attached to a parent object via the
attribute represented by this ``InstrumentedAttribute``.
"""
msg = "This AttributeImpl is not configured to track parents."
assert self.trackparent, msg
id_ = id(self.parent_token)
if value:
state.parents[id_] = parent_state
else:
if id_ in state.parents:
last_parent = state.parents[id_]
if (
last_parent is not False
and last_parent.key != parent_state.key
):
if last_parent.obj() is None:
raise orm_exc.StaleDataError(
"Removing state %s from parent "
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent."
% (
state_str(state),
state_str(parent_state),
self.key,
)
)
return
state.parents[id_] = False
def get_history(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
passive: PassiveFlag = PASSIVE_OFF,
) -> History:
raise NotImplementedError()
def get_all_pending(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
passive: PassiveFlag = PASSIVE_NO_INITIALIZE,
) -> _AllPendingType:
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly corresponds to:
get_state_history(
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
raise NotImplementedError()
def _default_value(
self, state: InstanceState[Any], dict_: _InstanceDict
) -> Any:
"""Produce an empty value for an uninitialized attribute."""
raise NotImplementedError()
def get(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
passive: PassiveFlag = PASSIVE_OFF,
) -> Any:
"""Retrieve a value from the given object.
If a callable is assembled on this object's attribute, and
passive is False, the callable will be executed and the
resulting value will be set as the new value for this attribute.
"""
if self.key in dict_:
return dict_[self.key]
else:
# if history present, don't load
key = self.key
if (
key not in state.committed_state
or state.committed_state[key] is NO_VALUE
):
if not passive & CALLABLES_OK:
return PASSIVE_NO_RESULT
value = self._fire_loader_callables(state, key, passive)
if value is PASSIVE_NO_RESULT or value is NO_VALUE:
return value
elif value is ATTR_WAS_SET:
try:
return dict_[key]
except KeyError as err:
# TODO: no test coverage here.
raise KeyError(
"Deferred loader for attribute "
"%r failed to populate "
"correctly" % key
) from err
elif value is not ATTR_EMPTY:
return self.set_committed_value(state, dict_, value)
if not passive & INIT_OK:
return NO_VALUE
else:
return self._default_value(state, dict_)
def _fire_loader_callables(
self, state: InstanceState[Any], key: str, passive: PassiveFlag
) -> Any:
if (
self.accepts_scalar_loader
and self.load_on_unexpire
and key in state.expired_attributes
):
return state._load_expired(state, passive)
elif key in state.callables:
callable_ = state.callables[key]
return callable_(state, passive)
elif self.callable_:
return self.callable_(state, passive)
else:
return ATTR_EMPTY
def append(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
value: Any,
initiator: Optional[AttributeEventToken],
passive: PassiveFlag = PASSIVE_OFF,
) -> None:
self.set(state, dict_, value, initiator, passive=passive)
def remove(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
value: Any,
initiator: Optional[AttributeEventToken],
passive: PassiveFlag = PASSIVE_OFF,
) -> None:
self.set(
state, dict_, None, initiator, passive=passive, check_old=value
)
def pop(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
value: Any,
initiator: Optional[AttributeEventToken],
passive: PassiveFlag = PASSIVE_OFF,
) -> None:
self.set(
state,
dict_,
None,
initiator,
passive=passive,
check_old=value,
pop=True,
)
def set(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
value: Any,
initiator: Optional[AttributeEventToken] = None,
passive: PassiveFlag = PASSIVE_OFF,
check_old: Any = None,
pop: bool = False,
) -> None:
raise NotImplementedError()
def delete(self, state: InstanceState[Any], dict_: _InstanceDict) -> None:
raise NotImplementedError()
def get_committed_value(
self,
state: InstanceState[Any],
dict_: _InstanceDict,
passive: PassiveFlag = PASSIVE_OFF,
) -> Any:
"""return the unchanged value of this attribute"""
if self.key in state.committed_state:
value = state.committed_state[self.key]
if value is NO_VALUE:
return None
else:
return value
else:
return self.get(state, dict_, passive=passive)
def set_committed_value(self, state, dict_, value):
"""set an attribute value on the given instance and 'commit' it."""
dict_[self.key] = value
state._commit(dict_, [self.key])
return value
| _AttributeImpl |
python | wandb__wandb | wandb/apis/importers/wandb.py | {
"start": 1957,
"end": 2738
} | class ____:
artifacts: Iterable[wandb.Artifact]
entity: str
project: str
type_: str
name: str
def __iter__(self) -> Iterator:
return iter(self.artifacts)
def __repr__(self) -> str:
return f"ArtifactSequence({self.identifier})"
@property
def identifier(self) -> str:
return "/".join([self.entity, self.project, self.type_, self.name])
@classmethod
def from_collection(cls, collection: ArtifactCollection):
arts = collection.artifacts()
arts = sorted(arts, key=lambda a: int(a.version.lstrip("v")))
return ArtifactSequence(
arts,
collection.entity,
collection.project,
collection.type,
collection.name,
)
| ArtifactSequence |
python | conda__conda | conda/gateways/repodata/__init__.py | {
"start": 1819,
"end": 2022
} | class ____(UnavailableInvalidChannel):
"""
Subclass used to determine when empty repodata should be cached, e.g. for a
channel that doesn't provide current_repodata.json
"""
| RepodataIsEmpty |
python | altair-viz__altair | altair/vegalite/v6/schema/mixins.py | {
"start": 620,
"end": 42104
} | class ____(SchemaBase):
"""
MarkDef schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
bandSize : float
The width of the ticks.
**Default value:** 3/4 of step (width step for horizontal ticks and height step for
vertical ticks).
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
binSpacing : float
Offset between bars for binned field. The ideal value for this is either 0
(preferred by statisticians) or 1 (Vega-Lite default, D3 example style).
**Default value:** ``1``
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
clip : bool, dict, :class:`ExprRef`
Whether a mark be clipped to the enclosing group's width and height.
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
continuousBandSize : float
The default size of the bars on continuous scales.
**Default value:** ``5``
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusEnd : dict, float, :class:`ExprRef`
* For vertical bars, top-left and top-right corner radius.
* For horizontal bars, top-right and bottom-right corner radius.
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl']
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
discreteBandSize : dict, float, :class:`RelativeBandSize`
The default size of the bars with discrete dimensions. If unspecified, the default
size is ``step-2``, which provides 2 pixel offset between bars.
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`, :class:`RelativeBandSize`
Height of the marks. One of:
* A number representing a fixed pixel height.
* A relative band size definition. For example, ``{band: 0.5}`` represents half of
the band
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
line : bool, dict, :class:`OverlayMarkDef`
A flag for overlaying line on top of area marks, or an object defining the
properties of the overlayed lines.
* If this value is an empty object (``{}``) or ``true``, lines with default
properties will be used.
* If this value is ``false``, no lines would be automatically added to area marks.
**Default value:** ``false``.
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
minBandSize : dict, float, :class:`ExprRef`
The minimum band size for bar and rectangle marks. **Default value:** ``0.25``
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
point : bool, dict, Literal['transparent'], :class:`OverlayMarkDef`
A flag for overlaying points on top of line or area marks, or an object defining the
properties of the overlayed points.
* If this property is ``"transparent"``, transparent points will be used (for
enhancing tooltips and selections).
* If this property is an empty object (``{}``) or ``true``, filled points with
default properties will be used.
* If this property is ``false``, no points would be automatically added to line or
area marks.
**Default value:** ``false``.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
radius2Offset : dict, float, :class:`ExprRef`
Offset for radius2.
radiusOffset : dict, float, :class:`ExprRef`
Offset for radius.
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
style : str, Sequence[str]
A string or array of strings indicating the name of custom styles to apply to the
mark. A style is a named collection of mark property defaults defined within the
`style configuration
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. If style is an
array, later styles will override earlier styles. Any `mark properties
<https://vega.github.io/vega-lite/docs/encoding.html#mark-prop>`__ explicitly
defined within the ``encoding`` will override a style default.
**Default value:** The mark's name. For example, a bar mark will have style
``"bar"`` by default. **Note:** Any specified style will augment the default style.
For example, a bar mark with ``"style": "foo"`` will receive from
``config.style.bar`` and ``config.style.foo`` (the specified style ``"foo"`` has
higher precedence).
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
theta2Offset : dict, float, :class:`ExprRef`
Offset for theta2.
thetaOffset : dict, float, :class:`ExprRef`
Offset for theta.
thickness : float
Thickness of the tick mark.
**Default value:** ``1``
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`, :class:`RelativeBandSize`
Width of the marks. One of:
* A number representing a fixed pixel width.
* A relative band size definition. For example, ``{band: 0.5}`` represents half of
the band.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2Offset : dict, float, :class:`ExprRef`
Offset for x2-position.
xOffset : dict, float, :class:`ExprRef`
Offset for x-position.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2Offset : dict, float, :class:`ExprRef`
Offset for y2-position.
yOffset : dict, float, :class:`ExprRef`
Offset for y-position.
"""
_schema = {"$ref": "#/definitions/MarkDef"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
bandSize: Optional[float] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
binSpacing: Optional[float] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
clip: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
continuousBandSize: Optional[float] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusEnd: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
discreteBandSize: Optional[float | SchemaBase | Map] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
line: Optional[bool | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
minBandSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
point: Optional[bool | SchemaBase | Literal["transparent"] | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radiusOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
style: Optional[str | Sequence[str]] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
thetaOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
thickness: Optional[float] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
xOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
yOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
bandSize=bandSize,
baseline=baseline,
binSpacing=binSpacing,
blend=blend,
clip=clip,
color=color,
continuousBandSize=continuousBandSize,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusEnd=cornerRadiusEnd,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
discreteBandSize=discreteBandSize,
dx=dx,
dy=dy,
ellipsis=ellipsis,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
line=line,
lineBreak=lineBreak,
lineHeight=lineHeight,
minBandSize=minBandSize,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
point=point,
radius=radius,
radius2=radius2,
radius2Offset=radius2Offset,
radiusOffset=radiusOffset,
shape=shape,
size=size,
smooth=smooth,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
style=style,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
theta2Offset=theta2Offset,
thetaOffset=thetaOffset,
thickness=thickness,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
x2Offset=x2Offset,
xOffset=xOffset,
y=y,
y2=y2,
y2Offset=y2Offset,
yOffset=yOffset,
**kwds,
)
| _MarkDef |
python | numba__numba | numba/tests/test_numpyadapt.py | {
"start": 448,
"end": 1307
} | class ____(unittest.TestCase):
def test_array_adaptor(self):
arystruct = ArrayStruct3D()
adaptorptr = _helperlib.c_helpers['adapt_ndarray']
adaptor = PYFUNCTYPE(c_int, py_object, c_void_p)(adaptorptr)
ary = np.arange(60).reshape(2, 3, 10)
status = adaptor(ary, byref(arystruct))
self.assertEqual(status, 0)
self.assertEqual(arystruct.data, ary.ctypes.data)
self.assertNotEqual(arystruct.meminfo, 0)
self.assertEqual(arystruct.parent, id(ary))
self.assertEqual(arystruct.nitems, 60)
self.assertEqual(arystruct.itemsize, ary.itemsize)
for i in range(3):
self.assertEqual(arystruct.shape[i], ary.ctypes.shape[i])
self.assertEqual(arystruct.strides[i], ary.ctypes.strides[i])
if __name__ == '__main__':
unittest.main()
| TestArrayAdaptor |
python | celery__celery | t/unit/utils/test_time.py | {
"start": 12364,
"end": 13615
} | class ____:
@patch('random.randrange', lambda n: n - 2)
def test_with_jitter(self):
assert get_exponential_backoff_interval(
factor=4,
retries=3,
maximum=100,
full_jitter=True
) == 4 * (2 ** 3) - 1
def test_without_jitter(self):
assert get_exponential_backoff_interval(
factor=4,
retries=3,
maximum=100,
full_jitter=False
) == 4 * (2 ** 3)
def test_bound_by_maximum(self):
maximum_boundary = 100
assert get_exponential_backoff_interval(
factor=40,
retries=3,
maximum=maximum_boundary
) == maximum_boundary
@patch('random.randrange', lambda n: n - 1)
def test_negative_values(self):
assert get_exponential_backoff_interval(
factor=-40,
retries=3,
maximum=100
) == 0
@patch('random.randrange')
def test_valid_random_range(self, rr):
rr.return_value = 0
maximum = 100
get_exponential_backoff_interval(
factor=40, retries=10, maximum=maximum, full_jitter=True)
rr.assert_called_once_with(maximum + 1)
| test_get_exponential_backoff_interval |
python | django__django | django/core/serializers/base.py | {
"start": 201,
"end": 303
} | class ____(KeyError):
"""The requested serializer was not found."""
pass
| SerializerDoesNotExist |
python | neetcode-gh__leetcode | python/0516-longest-palindromic-subsequence.py | {
"start": 1347,
"end": 1883
} | class ____:
def longestPalindromeSubseq(self, s: str) -> int:
return self.longestCommonSubsequence(s, s[::-1])
def longestCommonSubsequence(self, s1: str, s2: str) -> int:
N, M = len(s1), len(s2)
dp = [[0] * (M+1) for _ in range(N+1)]
for i in range(N):
for j in range(M):
if s1[i] == s2[j]:
dp[i+1][j+1] = 1 + dp[i][j]
else:
dp[i+1][j+1] = max(dp[i][j+1], dp[i+1][j])
return dp[N][M]
| Solution |
python | scipy__scipy | benchmarks/benchmarks/special.py | {
"start": 1098,
"end": 1346
} | class ____(Benchmark):
def setup(self):
x, y = np.logspace(3, 5, 10), np.logspace(3, 5, 10)
x, y = np.meshgrid(x, y)
self.large_z = x + 1j*y
def time_loggamma_asymptotic(self):
loggamma(self.large_z)
| Loggamma |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 96032,
"end": 99314
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Qwen3OmniMoeCode2WavConfig, layer_idx):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = Qwen3OmniMoeCode2WavAttention(config, layer_idx)
self.mlp = Qwen3OmniMoeCode2WavMlp(config)
self.input_layernorm = Qwen3OmniMoeCode2WavRMSNorm(config.hidden_size, config.rms_norm_eps)
self.post_attention_layernorm = Qwen3OmniMoeCode2WavRMSNorm(config.hidden_size, config.rms_norm_eps)
self.self_attn_layer_scale = Qwen3OmniMoeCode2WavLayerScale(config)
self.mlp_layer_scale = Qwen3OmniMoeCode2WavLayerScale(config)
self.attention_type = "sliding_attention"
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*):
attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
query_sequence_length, key_sequence_length)` if default attention is used.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual + self.self_attn_layer_scale(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + self.mlp_layer_scale(hidden_states)
return hidden_states
| Qwen3OmniMoeCode2WavTransformerLayer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/functions.py | {
"start": 13755,
"end": 16947
} | class ____(GoogleCloudBaseOperator):
"""
Deletes the specified function from Google Cloud Functions.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudFunctionDeleteFunctionOperator`
:param name: A fully-qualified function name, matching
the pattern: `^projects/[^/]+/locations/[^/]+/functions/[^/]+$`
:param gcp_conn_id: The connection ID to use to connect to Google Cloud.
:param api_version: API version used (for example v1 or v1beta1).
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START gcf_function_delete_template_fields]
template_fields: Sequence[str] = (
"name",
"gcp_conn_id",
"api_version",
"impersonation_chain",
)
# [END gcf_function_delete_template_fields]
operator_extra_links = (CloudFunctionsListLink(),)
def __init__(
self,
*,
name: str,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v1",
impersonation_chain: str | Sequence[str] | None = None,
project_id: str = PROVIDE_PROJECT_ID,
**kwargs,
) -> None:
self.name = name
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if not self.name:
raise AttributeError("Empty parameter: name")
pattern = FUNCTION_NAME_COMPILED_PATTERN
if not pattern.match(self.name):
raise AttributeError(f"Parameter name must match pattern: {FUNCTION_NAME_PATTERN}")
def execute(self, context: Context):
hook = CloudFunctionsHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
try:
project_id = self.project_id or hook.project_id
if project_id:
CloudFunctionsListLink.persist(
context=context,
project_id=project_id,
)
return hook.delete_function(self.name)
except HttpError as e:
status = e.resp.status
if status == 404:
self.log.info("The function does not exist in this project")
return None
self.log.error("An error occurred. Exiting.")
raise e
| CloudFunctionDeleteFunctionOperator |
python | openai__openai-python | src/openai/resources/evals/runs/runs.py | {
"start": 11320,
"end": 21377
} | class ____(AsyncAPIResource):
@cached_property
def output_items(self) -> AsyncOutputItems:
return AsyncOutputItems(self._client)
@cached_property
def with_raw_response(self) -> AsyncRunsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncRunsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncRunsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncRunsWithStreamingResponse(self)
async def create(
self,
eval_id: str,
*,
data_source: run_create_params.DataSource,
metadata: Optional[Metadata] | Omit = omit,
name: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCreateResponse:
"""
Kicks off a new run for a given evaluation, specifying the data source, and what
model configuration to use to test. The datasource will be validated against the
schema specified in the config of the evaluation.
Args:
data_source: Details about the run's data source.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
name: The name of the run.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return await self._post(
f"/evals/{eval_id}/runs",
body=await async_maybe_transform(
{
"data_source": data_source,
"metadata": metadata,
"name": name,
},
run_create_params.RunCreateParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCreateResponse,
)
async def retrieve(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunRetrieveResponse:
"""
Get an evaluation run by ID.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._get(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunRetrieveResponse,
)
def list(
self,
eval_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
status: Literal["queued", "in_progress", "completed", "canceled", "failed"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[RunListResponse, AsyncCursorPage[RunListResponse]]:
"""
Get a list of runs for an evaluation.
Args:
after: Identifier for the last run from the previous pagination request.
limit: Number of runs to retrieve.
order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
descending order. Defaults to `asc`.
status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
| `canceled`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
return self._get_api_list(
f"/evals/{eval_id}/runs",
page=AsyncCursorPage[RunListResponse],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"order": order,
"status": status,
},
run_list_params.RunListParams,
),
),
model=RunListResponse,
)
async def delete(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunDeleteResponse:
"""
Delete an eval run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._delete(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunDeleteResponse,
)
async def cancel(
self,
run_id: str,
*,
eval_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RunCancelResponse:
"""
Cancel an ongoing evaluation run.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not eval_id:
raise ValueError(f"Expected a non-empty value for `eval_id` but received {eval_id!r}")
if not run_id:
raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
return await self._post(
f"/evals/{eval_id}/runs/{run_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=RunCancelResponse,
)
| AsyncRuns |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 25960,
"end": 26696
} | class ____(Benchmark):
param_names = ['n_size']
params = [
[10, 4000]
]
def setup(self, n_size):
rng = np.random.default_rng(12345678)
self.u_values = rng.random(n_size) * 10
self.u_weights = rng.random(n_size) * 10
self.v_values = rng.random(n_size // 2) * 10
self.v_weights = rng.random(n_size // 2) * 10
def time_energy_distance(self, n_size):
stats.energy_distance(self.u_values, self.v_values,
self.u_weights, self.v_weights)
def time_wasserstein_distance(self, n_size):
stats.wasserstein_distance(self.u_values, self.v_values,
self.u_weights, self.v_weights)
| DistanceFunctions |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/bedrock.py | {
"start": 28148,
"end": 34105
} | class ____(AwsBaseOperator[BedrockAgentHook]):
"""
Begin an ingestion job, in which an Amazon Bedrock data source is added to an Amazon Bedrock knowledge base.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BedrockIngestDataOperator`
:param knowledge_base_id: The unique identifier of the knowledge base to which to add the data source. (templated)
:param data_source_id: The unique identifier of the data source to ingest. (templated)
:param ingest_data_kwargs: Any additional optional parameters to pass to the API call. (templated)
:param wait_for_completion: Whether to wait for cluster to stop. (default: True)
:param waiter_delay: Time in seconds to wait between status checks. (default: 60)
:param waiter_max_attempts: Maximum number of attempts to check for job completion. (default: 10)
:param deferrable: If True, the operator will wait asynchronously for the cluster to stop.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = BedrockAgentHook
template_fields: Sequence[str] = aws_template_fields(
"knowledge_base_id",
"data_source_id",
"ingest_data_kwargs",
)
def __init__(
self,
knowledge_base_id: str,
data_source_id: str,
ingest_data_kwargs: dict[str, Any] | None = None,
wait_for_completion: bool = True,
waiter_delay: int = 60,
waiter_max_attempts: int = 10,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.knowledge_base_id = knowledge_base_id
self.data_source_id = data_source_id
self.ingest_data_kwargs = ingest_data_kwargs or {}
self.wait_for_completion = wait_for_completion
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
self.deferrable = deferrable
self.indexing_error_max_attempts = 5
self.indexing_error_retry_delay = 5
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error while running ingestion job: {validated_event}")
self.log.info("Bedrock ingestion job `%s` complete.", validated_event["ingestion_job_id"])
return validated_event["ingestion_job_id"]
def execute(self, context: Context) -> str:
def start_ingestion_job():
try:
ingestion_job_id = self.hook.conn.start_ingestion_job(
knowledgeBaseId=self.knowledge_base_id, dataSourceId=self.data_source_id
)["ingestionJob"]["ingestionJobId"]
return ingestion_job_id
except ClientError as error:
error_message = error.response["Error"]["Message"].lower()
is_known_retryable_message = (
"dependency error document status code: 404" in error_message
or "request failed: [http_exception] server returned 401" in error_message
)
if all(
[
error.response["Error"]["Code"] == "ValidationException",
is_known_retryable_message,
self.indexing_error_max_attempts > 0,
]
):
self.indexing_error_max_attempts -= 1
self.log.warning(
"Index is not ready for ingestion, retrying in %s seconds.",
self.indexing_error_retry_delay,
)
self.log.info("%s retries remaining.", self.indexing_error_max_attempts)
sleep(self.indexing_error_retry_delay)
return start_ingestion_job()
raise
ingestion_job_id = start_ingestion_job()
if self.deferrable:
self.log.info("Deferring for ingestion job.")
self.defer(
trigger=BedrockIngestionJobTrigger(
knowledge_base_id=self.knowledge_base_id,
data_source_id=self.data_source_id,
ingestion_job_id=ingestion_job_id,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
if self.wait_for_completion:
self.log.info("Waiting for ingestion job %s", ingestion_job_id)
self.hook.get_waiter(waiter_name="ingestion_job_complete").wait(
knowledgeBaseId=self.knowledge_base_id,
dataSourceId=self.data_source_id,
ingestionJobId=ingestion_job_id,
)
return ingestion_job_id
| BedrockIngestDataOperator |
python | huggingface__transformers | tests/models/fastspeech2_conformer/test_modeling_fastspeech2_conformer.py | {
"start": 34184,
"end": 36421
} | class ____(unittest.TestCase):
def test_inference_integration(self):
model = FastSpeech2ConformerWithHifiGan.from_pretrained("espnet/fastspeech2_conformer_with_hifigan")
model.to(torch_device)
model.eval()
tokenizer = FastSpeech2ConformerTokenizer.from_pretrained("espnet/fastspeech2_conformer")
text = "Test that this generates speech"
input_ids = tokenizer(text, return_tensors="pt").to(torch_device)["input_ids"]
output = model(input_ids)
waveform = output.waveform
# waveform is too large (1, 52480), so only check first 100 elements
# fmt: off
expected_waveform = torch.tensor(
[-9.6345e-04, 1.3557e-03, 5.7559e-04, 2.4706e-04, 2.2675e-04, 1.2258e-04, 4.7784e-04, 1.0109e-03, -1.9718e-04, 6.3495e-04, 3.2106e-04, 6.3620e-05, 9.1713e-04, -2.5664e-05, 1.9596e-04, 6.0418e-04, 8.1112e-04, 3.6342e-04, -6.3396e-04, -2.0146e-04, -1.1768e-04, 4.3155e-04, 7.5599e-04, -2.2972e-04, -9.5665e-05, 3.3078e-04, 1.3793e-04, -1.4932e-04, -3.9645e-04, 3.6473e-05, -1.7224e-04, -4.5370e-05, -4.8950e-04, -4.3059e-04, 1.0451e-04, -1.0485e-03, -6.0410e-04, 1.6990e-04, -2.1997e-04, -3.8769e-04, -7.6898e-04, -3.2372e-04, -1.9783e-04, 5.2896e-05, -1.0586e-03, -7.8516e-04, 7.6867e-04, -8.5331e-05, -4.8158e-04, -4.5362e-05, -1.0770e-04, 6.6823e-04, 3.0765e-04, 3.3669e-04, 9.5677e-04, 1.0458e-03, 5.8129e-04, 3.3737e-04, 1.0816e-03, 7.0346e-04, 4.2378e-04, 4.3131e-04, 2.8095e-04, 1.2201e-03, 5.6121e-04, -1.1086e-04, 4.9908e-04, 1.5586e-04, 4.2046e-04, -2.8088e-04, -2.2462e-04, -1.5539e-04, -7.0126e-04, -2.8577e-04, -3.3693e-04, -1.2471e-04, -6.9104e-04, -1.2867e-03, -6.2651e-04, -2.5586e-04, -1.3201e-04, -9.4537e-04, -4.8438e-04, 4.1458e-04, 6.4109e-04, 1.0891e-04, -6.3764e-04, 4.5573e-04, 8.2974e-04, 3.2973e-06, -3.8274e-04, -2.0400e-04, 4.9922e-04, 2.1508e-04, -1.1009e-04, -3.9763e-05, 3.0576e-04, 3.1485e-05, -2.7574e-05, 3.3856e-04],
device=torch_device,
)
# fmt: on
torch.testing.assert_close(waveform[0, :100], expected_waveform, rtol=1e-4, atol=1e-4)
self.assertEqual(waveform.shape, (1, 52480))
| FastSpeech2ConformerWithHifiGanIntegrationTest |
python | django__django | django/contrib/gis/db/models/fields.py | {
"start": 11333,
"end": 11488
} | class ____(GeometryField):
geom_type = "POLYGON"
geom_class = Polygon
form_class = forms.PolygonField
description = _("Polygon")
| PolygonField |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-dashscope/llama_index/embeddings/dashscope/base.py | {
"start": 531,
"end": 755
} | class ____(str, Enum):
"""DashScope TextEmbedding models."""
TEXT_EMBEDDING_V1 = "text-embedding-v1"
TEXT_EMBEDDING_V2 = "text-embedding-v2"
TEXT_EMBEDDING_V3 = "text-embedding-v3"
| DashScopeTextEmbeddingModels |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_reflection.py | {
"start": 40821,
"end": 42215
} | class ____(fixtures.TestBase):
__requires__ = ("oracle_test_dblink",)
__only_on__ = "oracle"
__sparse_driver_backend__ = True
@classmethod
def setup_test_class(cls):
cls.dblink = config.file_config.get("sqla_testing", "oracle_db_link")
# note that the synonym here is still not totally functional
# when accessing via a different username as we do with the
# multiprocess test suite, so testing here is minimal
with testing.db.begin() as conn:
conn.exec_driver_sql(
"create table test_table "
"(id integer primary key, data varchar2(50))"
)
conn.exec_driver_sql(
"create synonym test_table_syn "
"for test_table@%s" % cls.dblink
)
@classmethod
def teardown_test_class(cls):
with testing.db.begin() as conn:
conn.exec_driver_sql("drop synonym test_table_syn")
conn.exec_driver_sql("drop table test_table")
def test_reflection(self):
"""test the resolution of the synonym/dblink."""
m = MetaData()
t = Table(
"test_table_syn",
m,
autoload_with=testing.db,
oracle_resolve_synonyms=True,
)
eq_(list(t.c.keys()), ["id", "data"])
eq_(list(t.primary_key), [t.c.id])
| DBLinkReflectionTest |
python | pallets__werkzeug | src/werkzeug/middleware/lint.py | {
"start": 3876,
"end": 6952
} | class ____:
def __init__(
self,
iterator: t.Iterable[bytes],
headers_set: tuple[int, Headers],
chunks: list[int],
) -> None:
self._iterator = iterator
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self) -> GuardedIterator:
return self
def __next__(self) -> bytes:
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_type("application iterator items", rv, bytes)
self.chunks.append(len(rv))
return rv
def close(self) -> None:
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
f"Entity header {key!r} found in 304 response.",
HTTPWarning,
stacklevel=2,
)
if bytes_sent:
warn(
"304 responses must not have a body.",
HTTPWarning,
stacklevel=2,
)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
f"{status_code} responses must have an empty content length.",
HTTPWarning,
stacklevel=2,
)
if bytes_sent:
warn(
f"{status_code} responses must not have a body.",
HTTPWarning,
stacklevel=2,
)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the"
" client do not match.",
WSGIWarning,
stacklevel=2,
)
def __del__(self) -> None:
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.",
WSGIWarning,
stacklevel=2,
)
except Exception:
pass
| GuardedIterator |
python | scikit-learn__scikit-learn | sklearn/utils/_metadata_requests.py | {
"start": 49167,
"end": 59941
} | class ____:
"""Mixin class for adding metadata request functionality.
``BaseEstimator`` inherits from this Mixin.
.. versionadded:: 1.3
"""
if TYPE_CHECKING: # pragma: no cover
# This code is never run in runtime, but it's here for type checking.
# Type checkers fail to understand that the `set_{method}_request`
# methods are dynamically generated, and they complain that they are
# not defined. We define them here to make type checkers happy.
# During type checking analyzers assume this to be True.
# The following list of defined methods mirrors the list of methods
# in SIMPLE_METHODS.
# fmt: off
def set_fit_request(self, **kwargs): pass
def set_partial_fit_request(self, **kwargs): pass
def set_predict_request(self, **kwargs): pass
def set_predict_proba_request(self, **kwargs): pass
def set_predict_log_proba_request(self, **kwargs): pass
def set_decision_function_request(self, **kwargs): pass
def set_score_request(self, **kwargs): pass
def set_split_request(self, **kwargs): pass
def set_transform_request(self, **kwargs): pass
def set_inverse_transform_request(self, **kwargs): pass
# fmt: on
def __init_subclass__(cls, **kwargs):
"""Set the ``set_{method}_request`` methods.
This uses PEP-487 [1]_ to set the ``set_{method}_request`` methods. It
looks for the information available in the set default values which are
set using ``__metadata_request__*`` class attributes, or inferred
from method signatures.
The ``__metadata_request__*`` class attributes are used when a method
does not explicitly accept a metadata through its arguments or if the
developer would like to specify a request value for those metadata
which are different from the default ``None``.
References
----------
.. [1] https://www.python.org/dev/peps/pep-0487
"""
try:
for method in SIMPLE_METHODS:
requests = cls._get_class_level_metadata_request_values(method)
if not requests:
continue
setattr(
cls,
f"set_{method}_request",
RequestMethod(method, sorted(requests)),
)
except Exception:
# if there are any issues here, it will be raised when
# ``get_metadata_routing`` is called. Here we are going to ignore
# all the issues and make sure class definition does not fail.
pass
super().__init_subclass__(**kwargs)
@classmethod
def _get_class_level_metadata_request_values(cls, method: str):
"""Get class level metadata request values.
This method first checks the `method`'s signature for passable metadata and then
updates these with the metadata request values set at class level via the
``__metadata_request__{method}`` class attributes.
This method (being a class-method), does not take request values set at
instance level into account.
"""
# Here we use `isfunction` instead of `ismethod` because calling `getattr`
# on a class instead of an instance returns an unbound function.
if not hasattr(cls, method) or not inspect.isfunction(getattr(cls, method)):
return dict()
# ignore the first parameter of the method, which is usually "self"
signature_items = list(
inspect.signature(getattr(cls, method)).parameters.items()
)[1:]
params = defaultdict(
str,
{
param_name: None
for param_name, param_info in signature_items
if param_name not in {"X", "y", "Y", "Xt", "yt"}
and param_info.kind
not in {param_info.VAR_POSITIONAL, param_info.VAR_KEYWORD}
},
)
# Then overwrite those defaults with the ones provided in
# `__metadata_request__{method}` class attributes, which take precedence over
# signature sniffing.
# need to go through the MRO since this is a classmethod and
# ``vars`` doesn't report the parent class attributes. We go through
# the reverse of the MRO so that child classes have precedence over
# their parents.
substr = f"__metadata_request__{method}"
for base_class in reversed(inspect.getmro(cls)):
# Copy is needed with free-threaded context to avoid
# RuntimeError: dictionary changed size during iteration.
# copy.deepcopy applied on an instance of base_class adds
# __slotnames__ attribute to base_class.
base_class_items = vars(base_class).copy().items()
for attr, value in base_class_items:
# we don't check for equivalence since python prefixes attrs
# starting with __ with the `_ClassName`.
if substr not in attr:
continue
for prop, alias in value.items():
# Here we add request values specified via those class attributes
# to the result dictionary (params). Adding a request which already
# exists will override the previous one. Since we go through the
# MRO in reverse order, the one specified by the lowest most classes
# in the inheritance tree are the ones which take effect.
if prop not in params and alias == UNUSED:
raise ValueError(
f"Trying to remove parameter {prop} with UNUSED which"
" doesn't exist."
)
params[prop] = alias
return {param: alias for param, alias in params.items() if alias is not UNUSED}
def _get_metadata_request(self):
"""Get requested metadata for the instance.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
request : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` instance.
"""
if hasattr(self, "_metadata_request"):
requests = get_routing_for_object(self._metadata_request)
else:
requests = MetadataRequest(owner=self)
for method in SIMPLE_METHODS:
setattr(
requests,
method,
MethodMetadataRequest(
owner=self,
method=method,
requests=self._get_class_level_metadata_request_values(method),
),
)
return requests
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRequest
A :class:`~sklearn.utils.metadata_routing.MetadataRequest` encapsulating
routing information.
"""
return self._get_metadata_request()
# Process Routing in Routers
# ==========================
# This is almost always the only method used in routers to process and route
# given metadata. This is to minimize the boilerplate required in routers.
# Here the first two arguments are positional only which makes everything
# passed as keyword argument a metadata. The first two args also have an `_`
# prefix to reduce the chances of name collisions with the passed metadata, and
# since they're positional only, users will never type those underscores.
def process_routing(_obj, _method, /, **kwargs):
"""Validate and route metadata.
This function is used inside a :term:`router`'s method, e.g. :term:`fit`,
to validate the metadata and handle the routing.
Assuming this signature of a router's fit method:
``fit(self, X, y, sample_weight=None, **fit_params)``,
a call to this function would be:
``process_routing(self, "fit", sample_weight=sample_weight, **fit_params)``.
Note that if routing is not enabled and ``kwargs`` is empty, then it
returns an empty routing where ``process_routing(...).ANYTHING.ANY_METHOD``
is always an empty dictionary.
.. versionadded:: 1.3
Parameters
----------
_obj : object
An object implementing ``get_metadata_routing``. Typically a
:term:`meta-estimator`.
_method : str
The name of the router's method in which this function is called.
**kwargs : dict
Metadata to be routed.
Returns
-------
routed_params : Bunch
A :class:`~utils.Bunch` of the form ``{"object_name": {"method_name":
{metadata: value}}}`` which can be used to pass the required metadata to
A :class:`~sklearn.utils.Bunch` of the form ``{"object_name": {"method_name":
{metadata: value}}}`` which can be used to pass the required metadata to
corresponding methods or corresponding child objects. The object names
are those defined in `obj.get_metadata_routing()`.
"""
if not kwargs:
# If routing is not enabled and kwargs are empty, then we don't have to
# try doing any routing, we can simply return a structure which returns
# an empty dict on routed_params.ANYTHING.ANY_METHOD.
class EmptyRequest:
def get(self, name, default=None):
return Bunch(**{method: dict() for method in METHODS})
def __getitem__(self, name):
return Bunch(**{method: dict() for method in METHODS})
def __getattr__(self, name):
return Bunch(**{method: dict() for method in METHODS})
return EmptyRequest()
if not (hasattr(_obj, "get_metadata_routing") or isinstance(_obj, MetadataRouter)):
raise AttributeError(
f"The given object ({_routing_repr(_obj)}) needs to either"
" implement the routing method `get_metadata_routing` or be a"
" `MetadataRouter` instance."
)
if _method not in METHODS:
raise TypeError(
f"Can only route and process input on these methods: {METHODS}, "
f"while the passed method is: {_method}."
)
request_routing = get_routing_for_object(_obj)
request_routing.validate_metadata(params=kwargs, method=_method)
routed_params = request_routing.route_params(params=kwargs, caller=_method)
return routed_params
| _MetadataRequester |
python | getsentry__sentry | src/sentry/shared_integrations/client/proxy.py | {
"start": 3863,
"end": 9995
} | class ____(ApiClient):
"""
Universal Client to access third-party resources safely in Hybrid Cloud.
Requests to third parties must always exit the Sentry subnet via the Control Silo, and only
add sensitive credentials at that stage.
When testing, client requests will always go to the base_url unless `self._use_proxy_url_for_tests`
is set to True. Enable to test proxying locally.
"""
_should_proxy_to_control = False
_use_proxy_url_for_tests = False
def __init__(
self,
integration_id: int | None = None,
org_integration_id: int | None = None,
keyid: str | None = None,
verify_ssl: bool = True,
logging_context: Mapping[str, Any] | None = None,
) -> None:
super().__init__(
verify_ssl=verify_ssl, logging_context=logging_context, integration_id=integration_id
)
self.org_integration_id = org_integration_id
self.keyid = keyid
# The default timeout value for the APIClient and the RegionSiloClient is 30 seconds.
# If the request flow for processing a Webhook outbox message is between the RegionSiloClient and the
# IntegrationProxyClient, then the IntegrationProxyClient will need to have a smaller timeout value.
# Otherwise, the RegionSiloClient will timeout before it can receive a response from the IntegrationProxyClient.
self.timeout = 10
if self.determine_whether_should_proxy_to_control():
self._should_proxy_to_control = True
self.proxy_url = get_proxy_url()
if in_test_environment() and not self._use_proxy_url_for_tests:
logger.info("proxy_disabled_in_test_env")
self.proxy_url = self.base_url
def build_session(self) -> SafeSession:
"""
Generates a safe Requests session for the API client to use.
This injects a custom is_ipaddress_permitted function to allow only connections to the IP address of the Control Silo.
We only validate the IP address from within the Region Silo.
For all other silo modes, we use the default is_ipaddress_permitted function, which tests against SENTRY_DISALLOWED_IPS.
"""
if SiloMode.get_current_mode() == SiloMode.REGION:
return build_session(
is_ipaddress_permitted=is_control_silo_ip_address,
max_retries=Retry(
total=options.get("hybridcloud.integrationproxy.retries"),
backoff_factor=0.1,
status_forcelist=[503],
allowed_methods=["PATCH", "HEAD", "PUT", "GET", "DELETE", "POST"],
),
)
return build_session()
@staticmethod
def determine_whether_should_proxy_to_control() -> bool:
return (
SiloMode.get_current_mode() == SiloMode.REGION
and getattr(settings, "SENTRY_SUBNET_SECRET", None) is not None
and getattr(settings, "SENTRY_CONTROL_ADDRESS", None) is not None
)
@control_silo_function
def authorize_request(self, prepared_request: PreparedRequest) -> PreparedRequest:
"""
Used in the Control Silo to authorize all outgoing requests to the service provider.
"""
return prepared_request
def finalize_request(self, prepared_request: PreparedRequest) -> PreparedRequest:
"""
Every request through these subclassed clients run this method.
If running as a monolith/control, we must authorize each request before sending.
If running as a region, we don't authorize and instead, send it to our proxy endpoint,
where tokens are added in by Control Silo. We do this to avoid race conditions around
stale tokens and centralize token refresh flows.
"""
if not self._should_proxy_to_control or not prepared_request.url:
prepared_request = self.authorize_request(prepared_request=prepared_request)
return prepared_request
assert self.base_url and self.proxy_url
base_url = self.base_url.rstrip("/")
if not prepared_request.url.startswith(base_url):
parsed = urlparse(prepared_request.url)
proxy_path = parsed.path
base_url = ParseResult(
scheme=parsed.scheme,
netloc=parsed.netloc,
path="",
params="",
query="",
fragment="",
).geturl()
base_url = base_url.rstrip("/")
# E.g. client.get("/chat.postMessage") -> proxy_path = 'chat.postMessage'
proxy_path = trim_leading_slashes(prepared_request.url[len(base_url) :])
proxy_url = self.proxy_url.rstrip("/")
url = f"{proxy_url}/"
if (
not self._should_proxy_to_control
or (in_test_environment() and not self._use_proxy_url_for_tests)
and proxy_path
):
# When proxying to control is disabled, or in the default test environment
# This proxy acts as a passthrough, so we need to append the path directly
url = f"{url}{proxy_path}".rstrip("/")
request_body = prepared_request.body
if not isinstance(request_body, bytes):
request_body = request_body.encode("utf-8") if request_body else DEFAULT_REQUEST_BODY
prepared_request.headers[PROXY_OI_HEADER] = str(self.org_integration_id)
prepared_request.headers[PROXY_PATH] = proxy_path
if self.keyid:
prepared_request.headers[PROXY_KEYID_HEADER] = str(self.keyid)
prepared_request.headers[PROXY_BASE_URL_HEADER] = base_url
assert settings.SENTRY_SUBNET_SECRET is not None
prepared_request.headers[PROXY_SIGNATURE_HEADER] = encode_subnet_signature(
secret=settings.SENTRY_SUBNET_SECRET,
base_url=base_url,
path=proxy_path,
identifier=str(self.org_integration_id),
request_body=request_body,
)
prepared_request.url = url
return prepared_request
| IntegrationProxyClient |
python | python-openxml__python-docx | tests/image/test_png.py | {
"start": 7530,
"end": 10602
} | class ____:
def it_can_construct_from_a_stream(
self, stream_, StreamReader_, stream_rdr_, _ChunkParser__init_
):
chunk_parser = _ChunkParser.from_stream(stream_)
StreamReader_.assert_called_once_with(stream_, BIG_ENDIAN)
_ChunkParser__init_.assert_called_once_with(ANY, stream_rdr_)
assert isinstance(chunk_parser, _ChunkParser)
def it_can_iterate_over_the_chunks_in_its_png_stream(
self, _iter_chunk_offsets_, _ChunkFactory_, stream_rdr_, chunk_, chunk_2_
):
offsets = [2, 4, 6]
chunk_lst = [chunk_, chunk_2_]
chunk_parser = _ChunkParser(stream_rdr_)
chunks = list(chunk_parser.iter_chunks())
_iter_chunk_offsets_.assert_called_once_with(chunk_parser)
assert _ChunkFactory_.call_args_list == [
call(PNG_CHUNK_TYPE.IHDR, stream_rdr_, offsets[0]),
call(PNG_CHUNK_TYPE.pHYs, stream_rdr_, offsets[1]),
]
assert chunks == chunk_lst
def it_iterates_over_the_chunk_offsets_to_help_parse(self, iter_offsets_fixture):
chunk_parser, expected_chunk_offsets = iter_offsets_fixture
chunk_offsets = list(chunk_parser._iter_chunk_offsets())
assert chunk_offsets == expected_chunk_offsets
# fixtures -------------------------------------------------------
@pytest.fixture
def chunk_(self, request):
return instance_mock(request, _Chunk)
@pytest.fixture
def chunk_2_(self, request):
return instance_mock(request, _Chunk)
@pytest.fixture
def _ChunkFactory_(self, request, chunk_lst_):
return function_mock(request, "docx.image.png._ChunkFactory", side_effect=chunk_lst_)
@pytest.fixture
def chunk_lst_(self, chunk_, chunk_2_):
return [chunk_, chunk_2_]
@pytest.fixture
def _ChunkParser__init_(self, request):
return initializer_mock(request, _ChunkParser)
@pytest.fixture
def _iter_chunk_offsets_(self, request):
chunk_offsets = (
(PNG_CHUNK_TYPE.IHDR, 2),
(PNG_CHUNK_TYPE.pHYs, 4),
)
return method_mock(
request,
_ChunkParser,
"_iter_chunk_offsets",
return_value=iter(chunk_offsets),
)
@pytest.fixture
def iter_offsets_fixture(self):
bytes_ = b"-filler-\x00\x00\x00\x00IHDRxxxx\x00\x00\x00\x00IEND"
stream_rdr = StreamReader(io.BytesIO(bytes_), BIG_ENDIAN)
chunk_parser = _ChunkParser(stream_rdr)
expected_chunk_offsets = [
(PNG_CHUNK_TYPE.IHDR, 16),
(PNG_CHUNK_TYPE.IEND, 28),
]
return chunk_parser, expected_chunk_offsets
@pytest.fixture
def StreamReader_(self, request, stream_rdr_):
return class_mock(request, "docx.image.png.StreamReader", return_value=stream_rdr_)
@pytest.fixture
def stream_(self, request):
return instance_mock(request, io.BytesIO)
@pytest.fixture
def stream_rdr_(self, request):
return instance_mock(request, StreamReader)
| Describe_ChunkParser |
python | django__django | tests/utils_tests/test_module_loading.py | {
"start": 5391,
"end": 5968
} | class ____(SimpleTestCase):
def test_import_string(self):
cls = import_string("django.utils.module_loading.import_string")
self.assertEqual(cls, import_string)
# Test exceptions raised
with self.assertRaises(ImportError):
import_string("no_dots_in_path")
msg = 'Module "utils_tests" does not define a "unexistent" attribute'
with self.assertRaisesMessage(ImportError, msg):
import_string("utils_tests.unexistent")
@modify_settings(INSTALLED_APPS={"append": "utils_tests.test_module"})
| ModuleImportTests |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 7701,
"end": 8583
} | class ____(HighlightSearchProcessor):
"""
Highlight the search terms that are used for highlighting the incremental
search. The style class 'incsearch' will be applied to the content.
Important: this requires the `preview_search=True` flag to be set for the
`BufferControl`. Otherwise, the cursor position won't be set to the search
match while searching, and nothing happens.
"""
_classname = "incsearch"
_classname_current = "incsearch.current"
def _get_search_text(self, buffer_control: BufferControl) -> str:
"""
The text we are searching for.
"""
# When the search buffer has focus, take that text.
search_buffer = buffer_control.search_buffer
if search_buffer is not None and search_buffer.text:
return search_buffer.text
return ""
| HighlightIncrementalSearchProcessor |
python | bokeh__bokeh | src/bokeh/models/widgets/tables.py | {
"start": 23054,
"end": 29667
} | class ____(TableWidget):
''' Two-dimensional grid for visualization and editing large amounts
of data.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
autosize_mode = Enum(AutosizeMode, default="force_fit", help="""
Describes the column autosizing mode with one of the following options:
``"fit_columns"``
Compute column widths based on cell contents but ensure the
table fits into the available viewport. This results in no
horizontal scrollbar showing up, but data can get unreadable
if there is not enough space available.
``"fit_viewport"``
Adjust the viewport size after computing columns widths based
on cell contents.
``"force_fit"``
Fit columns into available space dividing the table width across
the columns equally (equivalent to `fit_columns=True`).
This results in no horizontal scrollbar showing up, but data
can get unreadable if there is not enough space available.
``"none"``
Do not automatically compute column widths.
""")
auto_edit = Bool(False, help="""
When enabled editing mode is enabled after a single click on a
table cell.
""")
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
fit_columns = Nullable(Bool, help="""
**This is a legacy parameter.** For new development, use the
``autosize_mode`` parameter.
Whether columns should be fit to the available width. This results in
no horizontal scrollbar showing up, but data can get unreadable if there
is not enough space available. If set to True, each column's width is
understood as maximum width.
""")
frozen_columns = Nullable(Int, help="""
Integer indicating the number of columns to freeze. If set the first N
columns will be frozen which prevents them from scrolling out of frame.
""")
frozen_rows = Nullable(Int, help="""
Integer indicating the number of rows to freeze. If set the first N
rows will be frozen which prevents them from scrolling out of frame,
if set to a negative value last N rows will be frozen.
""")
sortable = Bool(True, help="""
Allows to sort table's contents. By default natural order is preserved.
To sort a column, click on it's header. Clicking one more time changes
sort direction. Use Ctrl + click to return to natural order. Use
Shift + click to sort multiple columns simultaneously.
""")
reorderable = Bool(True, help="""
Allows the reordering of a table's columns. To reorder a column,
click and drag a table's header to the desired location in the table.
The columns on either side will remain in their previous order.
""")
editable = Bool(False, help="""
Allows to edit table's contents. Needs cell editors to be configured on
columns that are required to be editable.
""")
selectable = Either(Bool(True), Enum("checkbox"), help="""
Whether a table's rows can be selected or not. Using ``checkbox`` is
equivalent to True, but makes selection visible through a checkbox
for each row, instead of highlighting rows. Multiple selection is
allowed and can be achieved by either clicking multiple checkboxes (if
enabled) or using Shift + click on rows.
""")
index_position = Nullable(Int, default=0, help="""
Where among the list of columns to insert a column displaying the row
index. Negative indices are supported, and specify an index position
from the end of the list of columns (i.e. standard Python behaviour).
To prevent the index column from being added, set to None.
If the absolute value of index_position is larger than the length of
the columns, then the index will appear at the beginning or end, depending
on the sign.
""")
index_header = String("#", help="""
The column header to display for the index column, if it is present.
""")
index_width = Int(40, help="""
The width of the index column, if present.
""")
scroll_to_selection = Bool(True, help="""
Whenever a selection is made on the data source, scroll the selected
rows into the table's viewport if none of the selected rows are already
in the viewport.
""")
header_row = Bool(True, help="""
Whether to show a header row with column names at the top of the table.
""")
width = Override(default=600)
height = Override(default=400)
row_height = Int(25, help="""
The height of each row in pixels.
""")
multi_selectable = Bool(True, help="""
Whether multiple rows can be selected.
""")
@staticmethod
def from_data(data, columns=None, formatters={}, **kwargs) -> DataTable:
""" Create a simple table from a pandas dataframe, dictionary or ColumnDataSource.
Args:
data (DataFrame or dict or ColumnDataSource) :
The data to create the table from. If the data is a dataframe
or dictionary, a ColumnDataSource will be created from it.
columns (list, optional) :
A list of column names to use from the input data.
If None, use all columns. (default: None)
formatters (dict, optional) :
A mapping of column names and corresponding Formatters to
apply to each column. (default: None)
Keyword arguments:
Any additional keyword arguments will be passed to DataTable.
Returns:
DataTable
Raises:
ValueError
If the provided data is not a ColumnDataSource
or a data source that a ColumnDataSource can be created from.
"""
if isinstance(data, ColumnDataSource):
source = data.clone()
else:
try:
source = ColumnDataSource(data)
except ValueError as e:
raise ValueError("Expected a ColumnDataSource or something a ColumnDataSource can be created from like a dict or a DataFrame") from e
if columns is not None:
source.data = {col: source.data[col] for col in columns}
table_columns = []
for c in source.data.keys():
formatter = formatters.get(c, Intrinsic)
table_columns.append(TableColumn(field=c, title=c, formatter=formatter))
return DataTable(source=source, columns=table_columns, index_position=None, **kwargs)
| DataTable |
python | google__jax | jax/_src/pallas/fuser/block_spec.py | {
"start": 20139,
"end": 20633
} | class ____(Protocol):
def __call__(
self,
ctx: UsageRuleContext,
used_outs: Sequence[set[Usage]] | set[Usage],
**params: Any,
) -> Sequence[set[Usage]]:
...
usage_rules: dict[core.Primitive, UsageRuleFn] = {}
def register_usage_rule(
prim: core.Primitive,
) -> Callable[[UsageRuleFn], UsageRuleFn]:
def wrapper(
f: UsageRuleFn,
) -> UsageRuleFn:
usage_rules[prim] = f
return f
return wrapper
# ## Eval interpreter rules
| UsageRuleFn |
python | pypa__warehouse | warehouse/oidc/models/gitlab.py | {
"start": 15525,
"end": 17132
} | class ____(GitLabPublisherMixin, PendingOIDCPublisher):
__tablename__ = "pending_gitlab_oidc_publishers"
__mapper_args__ = {"polymorphic_identity": "pending_gitlab_oidc_publishers"}
__table_args__ = ( # type: ignore[assignment]
UniqueConstraint(
"namespace",
"project",
"workflow_filepath",
"environment",
name="_pending_gitlab_oidc_publisher_uc",
),
)
id: Mapped[UUID] = mapped_column(
PG_UUID(as_uuid=True), ForeignKey(PendingOIDCPublisher.id), primary_key=True
)
def reify(self, session: Session) -> GitLabPublisher:
"""
Returns a `GitLabPublisher` for this `PendingGitLabPublisher`,
deleting the `PendingGitLabPublisher` in the process.
"""
maybe_publisher = (
session.query(GitLabPublisher)
.filter(
GitLabPublisher.namespace == self.namespace,
GitLabPublisher.project == self.project,
GitLabPublisher.workflow_filepath == self.workflow_filepath,
GitLabPublisher.environment == self.environment,
GitLabPublisher.issuer_url == self.issuer_url,
)
.one_or_none()
)
publisher = maybe_publisher or GitLabPublisher(
namespace=self.namespace,
project=self.project,
workflow_filepath=self.workflow_filepath,
environment=self.environment,
issuer_url=self.issuer_url,
)
session.delete(self)
return publisher
| PendingGitLabPublisher |
python | ansible__ansible | lib/ansible/_internal/_templating/_lazy_containers.py | {
"start": 2452,
"end": 2645
} | class ____:
"""Wrapper around values to indicate lazy behavior has not yet been applied."""
value: t.Any
@t.final
@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
| _LazyValue |
python | google__pytype | pytype/compare.py | {
"start": 499,
"end": 10441
} | class ____(Exception):
"""Comparing incompatible primitive constants."""
def _incompatible(left_name, right_name):
"""Incompatible primitive types can never be equal."""
if left_name == right_name:
return False
for group in NUMERIC, STRING:
if left_name in group and right_name in group:
return False
return True
def _is_primitive_constant(ctx, value):
if isinstance(value, abstract.PythonConstant):
return value.pyval.__class__ in ctx.convert.primitive_classes
return False
def _is_primitive(ctx, value):
if _is_primitive_constant(ctx, value):
return True
elif isinstance(value, abstract.Instance):
return value.full_name in ctx.convert.primitive_classes_by_name
return False
def _is_equality_cmp(op):
return op in (slots.EQ, slots.NE)
def _compare_constants(op, left, right):
try:
return slots.COMPARES[op](left, right)
except TypeError as e:
raise CmpTypeError() from e
def _compare_primitive_constant(ctx, op, left, right):
if _is_primitive_constant(ctx, right):
ret = _compare_constants(op, left.pyval, right.pyval)
if ret is not None:
return ret
return _compare_primitive(op, left, right)
def _compare_primitive(op, left, right):
# Determines when primitives are definitely not equal by checking for
# compatibility of their types.
if (
_is_equality_cmp(op)
and isinstance(right, abstract.Instance)
and _incompatible(left.full_name, right.full_name)
):
return op != slots.EQ
return None
def _get_constant_tuple_prefix(value: abstract.Tuple):
"""Given a tuple, get its longest prefix of constant elements."""
elements = []
for element_var in value.pyval:
try:
element = abstract_utils.get_atomic_python_constant(
element_var, tuple(value.ctx.convert.primitive_classes)
)
except abstract_utils.ConversionError:
return tuple(elements)
elements.append(element)
return tuple(elements)
def _compare_constant_tuple_prefix(op, prefix, constant_tuple, reverse):
"""Compares a tuple's constant prefix against a constant tuple.
Args:
op: A comparison operator, such as LT (less than).
prefix: A constant prefix of a non-constant tuple (referred to as "left" in
the inline comments). So if left=(3, 2, ...), prefix=(3, 2).
constant_tuple: A constant tuple (referred to as "right").
reverse: Whether left and right should be reversed for the comparison.
Returns:
A bool of the comparison result if it can be determined, None otherwise.
"""
length = min(len(prefix), len(constant_tuple))
trimmed_prefix = prefix[:length]
trimmed_constant_tuple = constant_tuple[:length]
if trimmed_prefix == trimmed_constant_tuple:
if len(prefix) >= len(constant_tuple):
# right is a strict prefix of left (since left contains at least one
# non-constant element in addition to `prefix`), so left > right.
if reverse:
return op in (slots.LT, slots.LE, slots.NE)
else:
return op in (slots.NE, slots.GE, slots.GT)
# We have something like left=(3, ...), right=(3, 2). We cannot tell how
# they would compare.
return None
# When left and right have non-equal, same-length prefixes, we can compare the
# prefixes to get the comparison results for the full tuples. For example, if
# we have op=LT, left=(3, ...), right=(4, 0), then:
# (3,) < (4,) => (3, ...) < (4, ...) => (3, ...) < (4, 0)
if reverse:
return _compare_constants(op, trimmed_constant_tuple, trimmed_prefix)
else:
return _compare_constants(op, trimmed_prefix, trimmed_constant_tuple)
def _compare_as_constant_tuples(op, left, right):
"""Checks if the values are constant tuples and compares them if so."""
if not isinstance(left, abstract.Tuple) or not isinstance(
right, abstract.Tuple
):
return None
# For each tuple, get the longest prefix of constant elements. For example:
# Tuple(PythonConstant(2), Instance(int), PythonConstant(3))
# will produce (2,).
left_prefix = _get_constant_tuple_prefix(left)
right_prefix = _get_constant_tuple_prefix(right)
left_is_constant = len(left_prefix) == len(left.pyval)
right_is_constant = len(right_prefix) == len(right.pyval)
if left_is_constant and right_is_constant:
# When all elements of both tuples are constants, we can natively call the
# comparison operator on the tuples.
return _compare_constants(op, left_prefix, right_prefix)
if not left_is_constant and not right_is_constant:
# Both tuples contain at least one non-constant element. It would be
# possible in some cases to return a more precise result than None by
# comparing constant prefixes, but it's complicated and not necessary for
# the main motivating use case, `sys.version_info {op} (major, minor)`.
return None
# When only one tuple has non-constant elements, we can still get some
# information by comparing its constant prefix against the constant tuple.
if left_is_constant:
return _compare_constant_tuple_prefix(op, right_prefix, left_prefix, True)
else:
return _compare_constant_tuple_prefix(op, left_prefix, right_prefix, False)
def _compare_tuple(op, left, right):
ret = _compare_as_constant_tuples(op, left, right)
if ret is not None:
return ret
# Determines when tuples are definitely not equal by checking their lengths.
if (
_is_equality_cmp(op)
and isinstance(right, abstract.Tuple)
and left.tuple_length != right.tuple_length
):
return op != slots.EQ
return None
def _compare_dict(op, left, right):
# Determines when dicts are definitely not equal by checking their key sets.
if (
_is_equality_cmp(op)
and left.is_concrete
and abstract_utils.is_concrete_dict(right)
and set(left.pyval) != set(right.pyval)
):
return op != slots.EQ
return None
def _compare_class(op, left, right):
del right # unused
# Classes without a custom metaclass are not orderable.
if left.cls.full_name != "builtins.type":
return None
if _is_equality_cmp(op):
return None
raise CmpTypeError()
def _compare_sequence_length(
op, left: abstract.SequenceLength, right: abstract.ConcreteValue
):
"""Compare sequence lengths for pattern matching."""
assert isinstance(right, abstract.ConcreteValue)
if op == slots.EQ:
if left.splat:
return None if left.length <= right.pyval else False
else:
return left.length == right.pyval
elif op == slots.GE:
if left.splat:
return True if left.length >= right.pyval else None
else:
return left.length >= right.pyval
else:
assert False, op
def cmp_rel(ctx, op, left, right):
"""Compare two variables."""
if _is_primitive_constant(ctx, left):
return _compare_primitive_constant(ctx, op, left, right)
elif _is_primitive(ctx, left) and _is_primitive(ctx, right):
return _compare_primitive(op, left, right)
elif isinstance(left, abstract.Tuple):
return _compare_tuple(op, left, right)
elif isinstance(left, abstract.Dict):
return _compare_dict(op, left, right)
elif isinstance(left, abstract.Class):
return _compare_class(op, left, right)
elif isinstance(left, abstract.SequenceLength):
return _compare_sequence_length(op, left, right)
else:
return None
def compatible_with(value, logical_value):
"""Returns the conditions under which the value could be True or False.
Args:
value: An abstract value.
logical_value: Either True or False.
Returns:
False: If the value could not evaluate to logical_value under any
circumstance (e.g. value is the empty list and logical_value is True).
True: If it is possible for the value to evaluate to the logical_value,
and any ambiguity cannot be resolved by additional bindings.
"""
if isinstance(value, abstract.List) and not value.is_concrete:
return True
elif isinstance(value, abstract.Dict) and not value.is_concrete:
# Always compatible with False. Compatible with True only if type
# parameters have been established (meaning that the dict can be
# non-empty).
return not logical_value or bool(
value.get_instance_type_parameter(abstract_utils.K).bindings
)
elif isinstance(value, abstract.LazyConcreteDict):
return value.is_empty() != logical_value
elif isinstance(value, abstract.PythonConstant):
return bool(value.pyval) == logical_value
elif isinstance(value, abstract.Instance):
name = value.full_name
if logical_value and name in _CONTAINER_NAMES:
# Containers with unset parameters cannot match True.
ret = value.has_instance_type_parameter(abstract_utils.T) and bool(
value.get_instance_type_parameter(abstract_utils.T).bindings
)
return ret
elif name == "builtins.NoneType":
# NoneType instances cannot match True.
return not logical_value
elif name in NUMERIC:
# Numeric types can match both True and False
return True
elif isinstance(value.cls, abstract.Class) and not value.cls.overrides_bool:
if getattr(value.cls, "template", None):
# A parameterized class can match both True and False, since it might be
# an empty container.
return True
# Objects evaluate to True unless explicitly overridden.
return logical_value
return True
elif isinstance(value, (abstract.Function, abstract.Class)):
# Functions and classes always evaluate to True.
return logical_value
else:
# By default a value is ambiguous - it could potentially evaluate to either
# True or False. Thus we return True here regardless of logical_value.
return True
def compatible_with_none(value):
return value.full_name == "builtins.NoneType" or isinstance(
value,
(
abstract.AMBIGUOUS_OR_EMPTY,
abstract.TypeParameterInstance,
abstract.ParamSpecInstance,
),
)
| CmpTypeError |
python | plotly__plotly.py | plotly/graph_objs/mesh3d/_lightposition.py | {
"start": 233,
"end": 3489
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "mesh3d"
_path_str = "mesh3d.lightposition"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
Numeric vector, representing the X coordinate for each vertex.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Numeric vector, representing the Y coordinate for each vertex.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
Numeric vector, representing the Z coordinate for each vertex.
The 'z' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Lightposition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.mesh3d.Lightposition`
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
Returns
-------
Lightposition
"""
super().__init__("lightposition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.mesh3d.Lightposition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.mesh3d.Lightposition`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Lightposition |
python | doocs__leetcode | solution/3300-3399/3353.Minimum Total Operations/Solution.py | {
"start": 0,
"end": 123
} | class ____:
def minOperations(self, nums: List[int]) -> int:
return sum(x != y for x, y in pairwise(nums))
| Solution |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_comm.py | {
"start": 23632,
"end": 63747
} | class ____(FSDPTest):
@property
def world_size(self) -> int:
return min(4, torch.get_device_module(device_type).device_count())
@skip_if_lt_x_gpu(2)
def test_fully_shard_backward_prefetch(self):
# Activation checkpointing should not affect the expected FSDP events
self.run_subtests(
{
"reshard_after_forward": [True, False, 2, None],
"checkpoint_impl": [None, "utils", "composable"],
},
self._test_backward_prefetch_forward_backward,
)
self.run_subtests(
{
"reshard_after_forward": [True, False, 2, None],
"checkpoint_impl": [None, "utils", "composable"],
},
self._test_backward_prefetch_multi_forward,
)
self._test_backward_prefetch_unused_in_backward(True)
def _test_backward_prefetch_forward_backward(
self,
reshard_after_forward: Union[bool, int, None],
checkpoint_impl: Optional[str],
):
n_layers = 3
model, optim, inp = self._init_transformer(
n_layers, reshard_after_forward, checkpoint_impl
)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
# Check the order for normal 1 forward, 1 backward, 1 optimizer step
with (
patch_unshard(unshard_with_record),
patch_post_backward(post_backward_with_record),
):
for iter_idx in range(3):
loss = model(inp)
expected_events = [
("unshard", "", TrainingState.FORWARD), # root
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
]
self.assertEqual(events, expected_events)
events.clear()
loss.sum().backward()
expected_events = []
# Root does not reshard after forward so there is no
# unshard event for it in backward
if reshard_after_forward is not None:
expected_events.append(("unshard", "", TrainingState.PRE_BACKWARD))
expected_events.extend(
[
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
# Explicit backward prefetching moves the unshards early
# by one module (note how swapping each unshard down one
# event would give the natural event order)
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
("post_backward", "", TrainingState.POST_BACKWARD),
]
)
if reshard_after_forward is False:
# No reshard after forward means no backward unshards
expected_events = [e for e in expected_events if e[0] != "unshard"]
self.assertEqual(events, expected_events)
events.clear()
optim.step()
optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
def _test_backward_prefetch_multi_forward(
self, reshard_after_forward: Union[bool, int], checkpoint_impl: Optional[str]
):
n_layers = 3
model, _, inp = self._init_transformer(
n_layers, reshard_after_forward, checkpoint_impl
)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
# Check the order for multiple forwards before 1 backward
with (
patch_unshard(unshard_with_record),
patch_post_backward(post_backward_with_record),
):
loss1 = model(inp)
loss2 = model(inp)
expected_events = [
("unshard", "", TrainingState.FORWARD), # root
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
]
if reshard_after_forward is not None:
expected_events.append(("unshard", "", TrainingState.FORWARD))
expected_events.extend(
[
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
]
)
if reshard_after_forward is False:
# No reshard after forward means no second set of unshards
expected_events = expected_events[:-4]
self.assertEqual(events, expected_events)
events.clear()
(loss1 + loss2).sum().backward()
expected_events = []
if reshard_after_forward is not None:
expected_events.append(("unshard", "", TrainingState.PRE_BACKWARD))
expected_events.extend(
[
# Same as the single forward/backward case except the root's
# post-backward does not run until the end of backward in the
# final callback (since the input not requiring gradient means
# that we do not have a tensor on which to hook for
# post-backward)
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
]
)
if reshard_after_forward is False:
# No reshard after forward means no backward unshards
expected_events = [e for e in expected_events if e[0] != "unshard"]
# However, the post-backward reshards, so the second set of
# unshards will run as real ops
expected_events += [
# Repeat the same pattern except with the root's post-backward
# at the end since the final callback runs
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
("post_backward", "", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_events)
events.clear()
def _test_backward_prefetch_unused_in_backward(
self, reshard_after_forward: Union[bool, int, None]
):
"""
Test a model with a linear module then a split into two linear modules,
where we run backward through one path first before the other, meaning
that (1) only one linear of the two split is used per backward and (2)
the initial shared linear is used in both backwards.
"""
dim = 8
model = nn.Sequential(nn.Linear(dim, dim), DoubleLinear(dim))
fully_shard(model[0], reshard_after_forward=reshard_after_forward)
fully_shard(model[1].lin1, reshard_after_forward=reshard_after_forward)
fully_shard(model[1].lin2, reshard_after_forward=reshard_after_forward)
fully_shard(model, reshard_after_forward=reshard_after_forward)
inp = torch.randn((4, dim), device=device_type.type)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
with (
patch_unshard(unshard_with_record),
patch_post_backward(post_backward_with_record),
):
loss1, loss2 = model(inp)
expected_events = [
# Root has no parameters, so it does not have an unshard
("unshard", "0", TrainingState.FORWARD),
("unshard", "1.lin1", TrainingState.FORWARD),
("unshard", "1.lin2", TrainingState.FORWARD),
]
self.assertEqual(events, expected_events)
events.clear()
model.set_is_last_backward(False)
loss2.sum().backward(retain_graph=True)
expected_events = [
("unshard", "1.lin2", TrainingState.PRE_BACKWARD),
# NOTE: This `1.lin1` unshard is a mistargeted prefetch.
("unshard", "1.lin1", TrainingState.PRE_BACKWARD),
("post_backward", "1.lin2", TrainingState.POST_BACKWARD),
("unshard", "0", TrainingState.PRE_BACKWARD),
("post_backward", "0", TrainingState.POST_BACKWARD),
# `1.lin1` post-backward hook runs but is a no-op
("post_backward", "1.lin1", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_events)
events.clear()
model.set_is_last_backward(True)
loss1.sum().backward()
expected_events = [
# NOTE: `1.lin1` is already unsharded from the mistargeted
# prefetch in the first backward.
# Prefetch `0`
("unshard", "0", TrainingState.PRE_BACKWARD),
("post_backward", "1.lin1", TrainingState.POST_BACKWARD),
("post_backward", "0", TrainingState.POST_BACKWARD),
# `1.lin2` post-backward hook runs but is a no-op
("post_backward", "1.lin2", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_events)
events.clear()
@skip_if_lt_x_gpu(2)
def test_set_modules_to_forward_prefetch(self):
n_layers = 4
reshard_after_forward = True
checkpoint_impl = "utils"
model, _, inp = self._init_transformer(
n_layers, reshard_after_forward, checkpoint_impl
)
def set_forward_prefetch(model: Transformer, num_to_prefetch: int) -> None:
# Use model-specific knowledge to configure forward prefetching:
# each transformer block (layer) prefetches for the next few
for i, layer in enumerate(model.layers):
if i >= len(model.layers) - num_to_prefetch:
break
layers_to_prefetch = [
model.layers[i + j] for j in range(1, num_to_prefetch + 1)
]
layer.set_modules_to_forward_prefetch(layers_to_prefetch)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
reshard_with_record = self._get_reshard_with_record(
FSDPParamGroup.reshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
expected_backward_events = [
# Default backward prefetching
("unshard", "", TrainingState.PRE_BACKWARD),
("unshard", "layers.3", TrainingState.PRE_BACKWARD),
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("reshard", "layers.3", TrainingState.POST_BACKWARD),
("post_backward", "layers.3", TrainingState.POST_BACKWARD),
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("reshard", "layers.2", TrainingState.POST_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("reshard", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("reshard", "layers.0", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
("reshard", "", TrainingState.POST_BACKWARD),
("post_backward", "", TrainingState.POST_BACKWARD),
]
with (
patch_unshard(unshard_with_record),
patch_reshard(reshard_with_record),
patch_post_backward(post_backward_with_record),
):
set_forward_prefetch(model, num_to_prefetch=1)
loss = model(inp)
expected_forward_events = [
("unshard", "", TrainingState.FORWARD),
# `layers.i` prefetches `layers.i+1`
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("reshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
("reshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.3", TrainingState.FORWARD),
("reshard", "layers.2", TrainingState.FORWARD),
("reshard", "layers.3", TrainingState.FORWARD),
("reshard", "", TrainingState.FORWARD),
]
self.assertEqual(events, expected_forward_events)
events.clear()
loss.sum().backward()
self.assertEqual(events, expected_backward_events)
events.clear()
set_forward_prefetch(model, num_to_prefetch=2)
loss = model(inp)
expected_forward_events = [
("unshard", "", TrainingState.FORWARD),
# `layers.i` prefetches `layers.i+1` and `layers.i+2`
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
("reshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.3", TrainingState.FORWARD),
("reshard", "layers.1", TrainingState.FORWARD),
("reshard", "layers.2", TrainingState.FORWARD),
("reshard", "layers.3", TrainingState.FORWARD),
("reshard", "", TrainingState.FORWARD),
]
self.assertEqual(events, expected_forward_events)
events.clear()
loss.sum().backward()
self.assertEqual(events, expected_backward_events)
events.clear()
@skip_if_lt_x_gpu(2)
def test_set_modules_to_backward_prefetch(self):
n_layers = 4
reshard_after_forward = True
checkpoint_impl = "utils"
model, _, inp = self._init_transformer(
n_layers, reshard_after_forward, checkpoint_impl
)
def set_backward_prefetch(model: Transformer, num_to_prefetch: int) -> None:
# Use model-specific knowledge to configure backward prefetching:
# each transformer block (layer) prefetches for the previous few
for i, layer in enumerate(model.layers):
if i < num_to_prefetch:
continue
layers_to_prefetch = [
model.layers[i - j] for j in range(1, num_to_prefetch + 1)
]
layer.set_modules_to_backward_prefetch(layers_to_prefetch)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
reshard_with_record = self._get_reshard_with_record(
FSDPParamGroup.reshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
expected_forward_events = [
# Default forward prefetching
("unshard", "", TrainingState.FORWARD), # root
("unshard", "layers.0", TrainingState.FORWARD),
("reshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1", TrainingState.FORWARD),
("reshard", "layers.1", TrainingState.FORWARD),
("unshard", "layers.2", TrainingState.FORWARD),
("reshard", "layers.2", TrainingState.FORWARD),
("unshard", "layers.3", TrainingState.FORWARD),
("reshard", "layers.3", TrainingState.FORWARD),
("reshard", "", TrainingState.FORWARD),
]
with (
patch_unshard(unshard_with_record),
patch_reshard(reshard_with_record),
patch_post_backward(post_backward_with_record),
):
set_backward_prefetch(model, num_to_prefetch=1)
loss = model(inp)
self.assertEqual(events, expected_forward_events)
events.clear()
loss.sum().backward()
expected_backward_events = [
("unshard", "", TrainingState.PRE_BACKWARD),
# Root prefetches `layers.3` per default
("unshard", "layers.3", TrainingState.PRE_BACKWARD),
# `layers.i` prefetches for `layers.i-1` (same as default)
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("reshard", "layers.3", TrainingState.POST_BACKWARD),
("post_backward", "layers.3", TrainingState.POST_BACKWARD),
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("reshard", "layers.2", TrainingState.POST_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("reshard", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("reshard", "layers.0", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
("reshard", "", TrainingState.POST_BACKWARD),
("post_backward", "", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_backward_events)
events.clear()
set_backward_prefetch(model, num_to_prefetch=2)
loss = model(inp)
self.assertEqual(events, expected_forward_events)
events.clear()
loss.sum().backward()
expected_backward_events = [
("unshard", "", TrainingState.PRE_BACKWARD),
# Root prefetches `layers.3` per default
("unshard", "layers.3", TrainingState.PRE_BACKWARD),
# `layers.i` prefetches for `layers.i-1` and `layers.i-2`
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
("reshard", "layers.3", TrainingState.POST_BACKWARD),
("post_backward", "layers.3", TrainingState.POST_BACKWARD),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
("reshard", "layers.2", TrainingState.POST_BACKWARD),
("post_backward", "layers.2", TrainingState.POST_BACKWARD),
("reshard", "layers.1", TrainingState.POST_BACKWARD),
("post_backward", "layers.1", TrainingState.POST_BACKWARD),
("reshard", "layers.0", TrainingState.POST_BACKWARD),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
("reshard", "", TrainingState.POST_BACKWARD),
("post_backward", "", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_backward_events)
events.clear()
@skip_if_lt_x_gpu(2)
def test_set_modules_to_backward_prefetch_inside_ac(self):
n_layers = 3
reshard_after_forward = True
# use checkpoint wrapper instead of torch.utils
model_args = ModelArgs(n_layers=n_layers, checkpoint_activations=False)
model = Transformer(model_args)
apply_activation_checkpointing(
model, check_fn=lambda m: isinstance(m, TransformerBlock)
)
apply_activation_checkpointing(
model, check_fn=lambda m: isinstance(m, FeedForward)
)
fully_shard([model.tok_embeddings, model.pos_embeddings])
for layer in model.layers:
# mimic fully_shard(layer.moe.experts)
fully_shard(
layer.feed_forward.w1, reshard_after_forward=reshard_after_forward
)
fully_shard(layer, reshard_after_forward=reshard_after_forward)
fully_shard(
[model.norm, model.output], reshard_after_forward=reshard_after_forward
)
fully_shard(model, reshard_after_forward=reshard_after_forward)
inp = torch.randint(
0,
model_args.vocab_size,
(2, model_args.max_seq_len),
device=device_type.type,
)
def set_backward_prefetch(model: Transformer) -> None:
# tell pyre model.set_modules_to_backward_prefetch is available
assert isinstance(model, FSDPModule)
assert isinstance(model.output, FSDPModule)
# mimic deepseek MOE
# prefetch layer - 1 and its feedforward before cpu sync during a2a
reversed_transformer_blocks = list(reversed(model.layers))
prev_transformer_blocks = reversed_transformer_blocks[1:] + [None]
if (
model.norm is not None
and model.output is not None
and len(model.layers) > 0
):
assert isinstance(reversed_transformer_blocks[0], FSDPModule)
model.output.set_modules_to_backward_prefetch(
[reversed_transformer_blocks[0]]
)
for transformer_block, prev_transformer_block in zip(
reversed_transformer_blocks, prev_transformer_blocks
):
assert isinstance(transformer_block, FSDPModule)
if prev_transformer_block is not None:
assert isinstance(prev_transformer_block, FSDPModule)
assert hasattr(prev_transformer_block.feed_forward, "w1")
assert isinstance(
prev_transformer_block.feed_forward.w1, FSDPModule
)
transformer_block.set_modules_to_backward_prefetch(
[
prev_transformer_block,
prev_transformer_block.feed_forward.w1,
]
)
elif model.tok_embeddings is not None:
assert isinstance(model.tok_embeddings, FSDPModule)
transformer_block.set_modules_to_backward_prefetch(
[model.tok_embeddings]
)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
reshard_with_record = self._get_reshard_with_record(
FSDPParamGroup.reshard, events
)
with (
patch_unshard(unshard_with_record),
patch_reshard(reshard_with_record),
):
loss = model(inp)
events.clear()
loss.sum().backward()
expected_backward_events = [
("unshard", "norm, output", TrainingState.PRE_BACKWARD),
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("reshard", "norm, output", TrainingState.POST_BACKWARD),
# layers.2 prefetch w1
(
"unshard",
"layers.2._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.PRE_BACKWARD,
),
# layers.2.w1 prefetch layers.1
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
(
"reshard",
"layers.2._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.2", TrainingState.POST_BACKWARD),
(
"unshard",
"layers.1._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.PRE_BACKWARD,
),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
(
"reshard",
"layers.1._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.1", TrainingState.POST_BACKWARD),
(
"unshard",
"layers.0._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.PRE_BACKWARD,
),
(
"unshard",
"tok_embeddings, pos_embeddings",
TrainingState.PRE_BACKWARD,
),
(
"reshard",
"layers.0._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.0", TrainingState.POST_BACKWARD),
(
"reshard",
"tok_embeddings, pos_embeddings",
TrainingState.POST_BACKWARD,
),
(
"reshard",
"tok_embeddings, pos_embeddings",
TrainingState.POST_BACKWARD,
),
("reshard", "norm, output", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_backward_events)
events.clear()
set_backward_prefetch(model)
loss = model(inp)
events.clear()
loss.sum().backward()
expected_backward_events = [
("unshard", "norm, output", TrainingState.PRE_BACKWARD),
# root explicit prefetch layers.2
("unshard", "layers.2", TrainingState.PRE_BACKWARD),
("reshard", "norm, output", TrainingState.POST_BACKWARD),
# layers.2 prefetch layers.1 and feed_forward
("unshard", "layers.1", TrainingState.PRE_BACKWARD),
(
"unshard",
"layers.1._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.PRE_BACKWARD,
),
# AC recompute_fn
(
"unshard",
"layers.2._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.FORWARD,
),
(
"reshard",
"layers.2._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.2", TrainingState.POST_BACKWARD),
# layers.1 prefetch layers.0
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
(
"unshard",
"layers.0._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.PRE_BACKWARD,
),
(
"reshard",
"layers.1._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.1", TrainingState.POST_BACKWARD),
# layers.0 prefetch embeddings
(
"unshard",
"tok_embeddings, pos_embeddings",
TrainingState.PRE_BACKWARD,
),
(
"reshard",
"layers.0._checkpoint_wrapped_module.feed_forward._checkpoint_wrapped_module.w1",
TrainingState.POST_BACKWARD,
),
("reshard", "layers.0", TrainingState.POST_BACKWARD),
(
"reshard",
"tok_embeddings, pos_embeddings",
TrainingState.POST_BACKWARD,
),
(
"reshard",
"tok_embeddings, pos_embeddings",
TrainingState.POST_BACKWARD,
),
("reshard", "norm, output", TrainingState.POST_BACKWARD),
]
self.assertEqual(events, expected_backward_events)
events.clear()
@skip_if_lt_x_gpu(2)
def test_fully_shard_multi_module_backward_prefetch(self):
n_layers = 5
model_args = ModelArgs(n_layers=n_layers, checkpoint_activations=True)
model = Transformer(model_args)
for i in range(n_layers):
if i == 0:
fully_shard(model.layers[i])
elif i % 2 == 1:
fully_shard([model.layers[i], model.layers[i + 1]])
fully_shard([model.tok_embeddings, model.pos_embeddings])
fully_shard([model.norm, model.output], reshard_after_forward=False)
fully_shard(model)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
inp = torch.randint(
0,
model_args.vocab_size,
(2, model_args.max_seq_len),
device=device_type.type,
)
with (
patch_unshard(unshard_with_record),
patch_post_backward(post_backward_with_record),
):
for _ in range(3):
loss = model(inp)
expected_events = [
(
"unshard",
"tok_embeddings, pos_embeddings",
TrainingState.FORWARD,
),
("unshard", "layers.0", TrainingState.FORWARD),
("unshard", "layers.1, layers.2", TrainingState.FORWARD),
("unshard", "layers.3, layers.4", TrainingState.FORWARD),
("unshard", "norm, output", TrainingState.FORWARD),
]
self.assertEqual(events, expected_events)
events.clear()
loss.sum().backward()
expected_events = [
# (norm, output) does not reshard after forward, so there is
# no unshard to begin backward
("unshard", "layers.3, layers.4", TrainingState.PRE_BACKWARD),
("post_backward", "norm, output", TrainingState.POST_BACKWARD),
("unshard", "layers.1, layers.2", TrainingState.PRE_BACKWARD),
(
"post_backward",
"layers.3, layers.4",
TrainingState.POST_BACKWARD,
),
("unshard", "layers.0", TrainingState.PRE_BACKWARD),
(
"post_backward",
"layers.1, layers.2",
TrainingState.POST_BACKWARD,
),
(
"unshard",
"tok_embeddings, pos_embeddings",
TrainingState.PRE_BACKWARD,
),
("post_backward", "layers.0", TrainingState.POST_BACKWARD),
(
"post_backward",
"tok_embeddings, pos_embeddings",
TrainingState.POST_BACKWARD,
),
]
events.clear()
optim.step()
optim.zero_grad()
@skip_if_lt_x_gpu(2)
def test_fully_shard_multi_module_unused_module(self):
class ModuleWithUnusedLinear(nn.Module):
def __init__(self) -> None:
super().__init__()
self.unused_lin = nn.Linear(1, 1)
self.lin = nn.Linear(16, 16)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return nn.functional.relu(self.lin(x))
model = nn.Sequential(
ModuleWithUnusedLinear(), ModuleWithUnusedLinear(), nn.Linear(16, 16)
)
fully_shard([model[0].unused_lin, model[0].lin], reshard_after_forward=True)
fully_shard([model[1].unused_lin, model[1].lin], reshard_after_forward=True)
fully_shard(model)
optim = torch.optim.AdamW(model.parameters(), lr=1e-2)
events: list[EventType] = []
unshard_with_record = self._get_unshard_with_record(
FSDPParamGroup.unshard, events
)
post_backward_with_record = self._get_post_backward_with_record(
FSDPParamGroup.post_backward, events
)
inp = torch.randn((2, 16), device=device_type.type)
with (
patch_unshard(unshard_with_record),
patch_post_backward(post_backward_with_record),
):
for _ in range(3):
loss = model(inp)
expected_events = [
("unshard", "", TrainingState.FORWARD),
("unshard", "0.unused_lin, 0.lin", TrainingState.FORWARD),
("unshard", "1.unused_lin, 1.lin", TrainingState.FORWARD),
]
self.assertEqual(events, expected_events)
events.clear()
loss.sum().backward()
expected_events = [
# Since both `model[0]` and `model[1]` have unused modules
# that never ran forward, they do not reshard after forward
# despite setting it to `True`. Check that there are no
# unshards in backward.
(
"post_backward",
"1.unused_lin, 1.lin",
TrainingState.POST_BACKWARD,
),
(
"post_backward",
"0.unused_lin, 0.lin",
TrainingState.POST_BACKWARD,
),
("post_backward", "", TrainingState.POST_BACKWARD),
]
events.clear()
optim.step()
optim.zero_grad()
@skip_if_lt_x_gpu(2)
def test_backward_misprefetch(self):
torch.manual_seed(42)
model = MLP(dim=16, device=device_type)
ref_model = copy.deepcopy(model)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
fully_shard(model.in_proj)
fully_shard(model.out_proj)
fully_shard(model)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
# Backward should run through `out_proj` -> `in_proj`, so if `in_proj`
# prefetches for `out_proj`, then this is a misprefetch, as `out_proj`
# should not be needed anymore for backward.
model.in_proj.set_modules_to_backward_prefetch([model.out_proj])
torch.manual_seed(self.rank + 1)
inp = torch.randn((2, 16), device=device_type.type)
for _ in range(3):
ref_optim.zero_grad()
ref_loss = ref_model(inp).sum()
ref_loss.backward()
for param in ref_model.parameters():
dist.all_reduce(param.grad, op=dist.ReduceOp.AVG)
ref_optim.step()
optim.zero_grad()
loss = model(inp).sum()
loss.backward()
optim.step()
self.assertEqual(ref_loss, loss)
def _init_transformer(
self,
n_layers: int,
reshard_after_forward: Union[bool, int, None],
checkpoint_impl: Optional[str],
):
model_args = ModelArgs(
n_layers=n_layers, checkpoint_activations=(checkpoint_impl == "utils")
)
model = Transformer(model_args)
for module in model.modules():
if isinstance(module, TransformerBlock):
if checkpoint_impl == "composable":
checkpoint(module)
fully_shard(module, reshard_after_forward=reshard_after_forward)
fully_shard(model, reshard_after_forward=reshard_after_forward)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
inp = torch.randint(
0,
model_args.vocab_size,
(2, model_args.max_seq_len),
device=device_type.type,
)
return model, optim, inp
def _get_unshard_with_record(
self, orig_unshard: Callable, events: list[EventType]
) -> Callable:
def unshard_with_record(self, *args, **kwargs):
nonlocal events
if (
self._all_gather_result is None
and self._sharded_state != ShardedState.UNSHARDED
): # skip no-ops
events.append(("unshard", self._module_fqn, self._training_state))
return orig_unshard(self, *args, **kwargs)
return unshard_with_record
def _get_reshard_with_record(
self, orig_reshard: Callable, events: list[EventType]
) -> Callable:
def reshard_with_record(self, *args, **kwargs):
nonlocal events
if (
self._training_state == TrainingState.FORWARD
and not self._reshard_after_forward
): # skip no-ops
return
events.append(("reshard", self._module_fqn, self._training_state))
return orig_reshard(self, *args, **kwargs)
return reshard_with_record
def _get_post_backward_with_record(
self, orig_post_backward: Callable, events: list[EventType]
) -> Callable:
def post_backward_with_record(self, *args, **kwargs):
nonlocal events
ret = orig_post_backward(self, *args, **kwargs)
# Use training state after running post-backward to check that the
# state is transitioned to `POST_BACKWARD` as expected
events.append(("post_backward", self._module_fqn, self._training_state))
return ret
return post_backward_with_record
| TestFullyShardPrefetch |
python | PrefectHQ__prefect | tests/events/server/test_in_memory_ordering.py | {
"start": 12114,
"end": 15394
} | class ____:
async def test_ordering_is_correct(
self,
causal_ordering: CausalOrdering,
in_proper_order: Sequence[ReceivedEvent],
example: Sequence[ReceivedEvent],
):
processed: list[ReceivedEvent] = []
async def evaluate(event: ReceivedEvent, depth: int = 0) -> None:
async with causal_ordering.preceding_event_confirmed(
evaluate, event, depth=depth
):
processed.append(event)
example = list(example)
while example:
try:
await evaluate(example.pop(0))
except EventArrivedEarly:
continue
assert processed == list(in_proper_order)
async def test_wait_for_leader_no_follows(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Event without follows should not wait
await causal_ordering.wait_for_leader(event_one) # Should not raise
async def test_wait_for_leader_self_follows(
self, causal_ordering: CausalOrdering, event_one: ReceivedEvent
):
# Event that follows itself should not wait
event_one.follows = event_one.id
await causal_ordering.wait_for_leader(event_one) # Should not raise
async def test_wait_for_leader_old_event(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Old events should not wait - patch datetime.now to make event appear old
with patch("prefect.types._datetime.now") as mock_now:
future_time = (
event_two.received + PRECEDING_EVENT_LOOKBACK + timedelta(seconds=1)
)
mock_now.return_value = future_time
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_seen(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Mark leader as seen
await causal_ordering.record_event_as_seen(event_one)
# Should not wait
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_in_flight(
self,
causal_ordering: CausalOrdering,
event_one: ReceivedEvent,
event_two: ReceivedEvent,
):
# Mark leader as processing
await causal_ordering.record_event_as_processing(event_one)
# Start a task that will mark the leader as seen after a short delay
async def mark_seen_later():
await asyncio.sleep(0.1)
await causal_ordering.record_event_as_seen(event_one)
asyncio.create_task(mark_seen_later())
# Should wait and then proceed
await causal_ordering.wait_for_leader(event_two) # Should not raise
async def test_wait_for_leader_arrives_early(
self, causal_ordering: CausalOrdering, event_two: ReceivedEvent
):
# Leader not seen or processing - should raise EventArrivedEarly
with pytest.raises(EventArrivedEarly) as exc_info:
await causal_ordering.wait_for_leader(event_two)
assert exc_info.value.event == event_two
| TestCausalOrderingFlow |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_overview.py | {
"start": 143,
"end": 2242
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-overview"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
def test_simple(self) -> None:
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_cross_org_403(self) -> None:
org = self.create_organization()
team = self.create_team(organization=org, name="foo", slug="foo")
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=org, role="member", teams=[team])
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
self.login_as(user=user)
self.get_error_response(other_org.slug, other_project.slug, status_code=403)
def test_superuser_simple(self) -> None:
superuser = self.create_user(is_superuser=True)
self.login_as(user=superuser, superuser=True)
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_staff_simple(self) -> None:
staff_user = self.create_user(is_staff=True)
self.login_as(user=staff_user, staff=True)
response = self.get_success_response(self.project.organization.slug, self.project.slug)
assert response.data["id"] == str(self.project.id)
def test_non_org_rename_403(self) -> None:
org = self.create_organization()
team = self.create_team(organization=org, name="foo", slug="foo")
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=org, role="member", teams=[team])
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
ProjectRedirect.record(other_project, "old_slug")
self.login_as(user=user)
self.get_error_response(other_org.slug, "old_slug", status_code=403)
| ProjectOverviewTest |
python | django__django | tests/migrations/test_multidb.py | {
"start": 532,
"end": 698
} | class ____:
"""
A router that always allows migrating.
"""
def allow_migrate(self, db, app_label, **hints):
return True
| MigrateEverythingRouter |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/detector/test_stateful.py | {
"start": 5992,
"end": 18498
} | class ____(TestCase):
def setUp(self) -> None:
self.group_key: DetectorGroupKey = None
self.detector = self.create_detector(
name="Stateful Detector",
project=self.project,
)
self.detector.workflow_condition_group = self.create_data_condition_group()
def add_condition(
val: str | int,
result: DetectorPriorityLevel,
condition_type: str = "eq",
) -> None:
self.create_data_condition(
type=condition_type,
comparison=val,
condition_group=self.detector.workflow_condition_group,
condition_result=result,
)
# Setup conditions for each priority level
add_condition(val="OK", result=Level.OK)
add_condition(val="LOW", result=Level.LOW)
add_condition(val="MEDIUM", result=Level.MEDIUM)
add_condition(val="HIGH", result=Level.HIGH)
self.handler = MockDetectorStateHandler(
detector=self.detector,
thresholds={
Level.LOW: 2,
Level.MEDIUM: 2,
Level.HIGH: 2,
},
)
def packet(self, key: int, result: DataConditionResult | str) -> DataPacket:
"""
Constructs a test data packet that will evaluate to the
DetectorPriorityLevel specified for the result parameter.
See the `add_condition` to understand the priority level -> group value
mappings.
"""
value = result
if isinstance(result, DetectorPriorityLevel):
value = result.name
packet = {
"id": str(key),
"dedupe": key,
"group_vals": {self.group_key: value},
}
return DataPacket(source_id=str(key), packet=packet)
def test_evaualte__under_threshold(self) -> None:
# First evaluation does not trigger the threshold
result = self.handler.evaluate(self.packet(1, Level.HIGH))
assert result == {}
def test_evaluate__override_threshold__triggered(self) -> None:
# First evaluation does not trigger the threshold
self.handler.evaluate(self.packet(1, Level.HIGH))
# Second evaluation surpasses threshold and triggers
result = self.handler.evaluate(self.packet(2, Level.HIGH))
assert result
evaluation_result = result[self.group_key]
assert evaluation_result
assert evaluation_result.priority == Level.HIGH
assert isinstance(evaluation_result.result, IssueOccurrence)
evidence_data = evaluation_result.result.evidence_data
assert evidence_data["detector_id"] == self.detector.id
def test_evaluate__detector_state(self) -> None:
# Two evaluations triggers threshold
self.handler.evaluate(self.packet(1, Level.HIGH))
self.handler.evaluate(self.packet(2, Level.HIGH))
state_data = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
assert state_data.is_triggered is True
assert state_data.status == Level.HIGH
# Only has configured states
assert state_data.counter_updates == {
Level.HIGH: 2,
Level.MEDIUM: 2,
Level.LOW: 2,
Level.OK: None,
}
def test_evaluate__detector_state__all_levels(self) -> None:
# A single HIGH evaluation should increment all levels
self.handler.evaluate(self.packet(1, Level.HIGH))
state_data = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
# Verify all the levels are present now
assert state_data.counter_updates == {
**{level: 1 for level in Level},
Level.OK: None,
}
def test_evaluate__resolves(self) -> None:
# Two HIGH evaluations will trigger
result = self.handler.evaluate(self.packet(1, Level.HIGH))
result = self.handler.evaluate(self.packet(2, Level.HIGH))
assert result.get(self.group_key)
assert isinstance(result[self.group_key].result, IssueOccurrence)
# Resolves after a OK packet
result = self.handler.evaluate(self.packet(3, Level.OK))
assert result.get(self.group_key)
evaluation_result = result[self.group_key]
assert isinstance(evaluation_result.result, StatusChangeMessage)
assert evaluation_result.priority == Level.OK
assert evaluation_result.result.detector_id == self.detector.id
def test_evaluate__high_to_low(self) -> None:
# One HIGH then one LOW will result in a low evaluation
result = self.handler.evaluate(self.packet(1, Level.HIGH))
assert result == {}
result = self.handler.evaluate(self.packet(2, Level.LOW))
assert result.get(self.group_key)
evaluation_result = result[self.group_key]
assert isinstance(evaluation_result.result, IssueOccurrence)
assert evaluation_result.priority == Level.LOW
def test_evaluate__low_to_high(self) -> None:
# Two LOW evaluations result in a LOW
result = self.handler.evaluate(self.packet(1, Level.LOW))
result = self.handler.evaluate(self.packet(2, Level.LOW))
assert result.get(self.group_key)
evaluation_result = result[self.group_key]
assert isinstance(evaluation_result.result, IssueOccurrence)
assert evaluation_result.priority == Level.LOW
# Followed by two HIGH evaluations to result in a high
result = self.handler.evaluate(self.packet(3, Level.HIGH))
assert result == {}
result = self.handler.evaluate(self.packet(4, Level.HIGH))
assert result.get(self.group_key)
evaluation_result = result[self.group_key]
assert isinstance(evaluation_result.result, IssueOccurrence)
assert evaluation_result.priority == Level.HIGH
def test_evaluate__resolve__detector_state(self) -> None:
# Two HIGH evaluations will trigger
self.handler.evaluate(self.packet(1, Level.HIGH))
self.handler.evaluate(self.packet(2, Level.HIGH))
# A final OK will resolve
self.handler.evaluate(self.packet(3, Level.OK))
state_data = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
# Check that the state is reset
assert state_data.is_triggered is False
assert state_data.status == Level.OK
# Only has configured states
assert state_data.counter_updates == {
**{level: None for level in self.handler._thresholds},
}
def test_evaluate__trigger_after_resolve(self) -> None:
# Two HIGH evaluations will trigger
self.handler.evaluate(self.packet(1, Level.HIGH))
self.handler.evaluate(self.packet(2, Level.HIGH))
# A final OK will resolve
self.handler.evaluate(self.packet(3, Level.OK))
# Evaluate again, but under threshold so no trigger
result = self.handler.evaluate(self.packet(4, Level.HIGH))
assert result == {}
# Evaluate again and cause a trigger
result = self.handler.evaluate(self.packet(5, Level.HIGH))
assert result
evaluation_result = result[self.group_key]
assert evaluation_result
assert evaluation_result.priority == Level.HIGH
assert isinstance(evaluation_result.result, IssueOccurrence)
def test_evaluate__trigger_after_resolve__detector_state(self) -> None:
# Two HIGH evaluations will trigger
self.handler.evaluate(self.packet(1, Level.HIGH))
self.handler.evaluate(self.packet(2, Level.HIGH))
# A final OK will resolve
self.handler.evaluate(self.packet(3, Level.OK))
# Evaluate again, but under threshold so no trigger
self.handler.evaluate(self.packet(4, Level.HIGH))
state_data = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
assert self.handler._thresholds[Level.HIGH] == 2
assert state_data.is_triggered is False
# Evaluate again and cause a trigger
self.handler.evaluate(self.packet(5, Level.HIGH))
state_data = self.handler.state_manager.get_state_data([self.group_key])[self.group_key]
assert state_data.is_triggered is True
assert state_data.status == Level.HIGH
def test_evaluate__ok_resets_counters(self) -> None:
# This should NOT trigger for HIGH since there's an OK in-between
result = self.handler.evaluate(self.packet(1, Level.HIGH))
result = self.handler.evaluate(self.packet(2, Level.OK))
result = self.handler.evaluate(self.packet(3, Level.HIGH))
assert result == {}
def test_evaluate__low_threshold_larger_than_high(self) -> None:
"""
Test that a LOW threshold that is larger than the HIGH threshold does
not trigger once the HIGH threshold has already triggered.
"""
test_handler = MockDetectorStateHandler(
detector=self.detector,
thresholds={
Level.LOW: 3,
Level.MEDIUM: 2,
Level.HIGH: 2,
},
)
# First two trigger a high result
result = test_handler.evaluate(self.packet(1, Level.HIGH))
result = test_handler.evaluate(self.packet(2, Level.HIGH))
state_data = test_handler.state_manager.get_state_data([self.group_key])[self.group_key]
assert state_data.is_triggered is True
assert state_data.status == Level.HIGH
# Third evaluation does NOT trigger another result
result = test_handler.evaluate(self.packet(3, Level.HIGH))
assert result == {}
# Three LOW results trigger low evaluation
result = test_handler.evaluate(self.packet(4, Level.LOW))
assert result == {}
result = test_handler.evaluate(self.packet(5, Level.LOW))
assert result == {}
result = test_handler.evaluate(self.packet(6, Level.LOW))
state_data = test_handler.state_manager.get_state_data([self.group_key])[self.group_key]
assert state_data.is_triggered is True
assert state_data.status == Level.LOW
def test_evaluate__counter_reset_for_non_none_group_key(self) -> None:
self.group_key = "group1"
# Trigger HIGH priority
result = self.handler.evaluate(self.packet(1, Level.HIGH))
assert result == {}
result = self.handler.evaluate(self.packet(2, Level.HIGH))
assert result[self.group_key].priority == Level.HIGH
# Evaluate again at HIGH priority (same as current state)
result = self.handler.evaluate(self.packet(3, Level.HIGH))
assert result == {}
# Evaluate at MEDIUM priority - should require 2 evaluations to trigger
result = self.handler.evaluate(self.packet(4, Level.MEDIUM))
assert result == {}
result = self.handler.evaluate(self.packet(5, Level.MEDIUM))
assert result[self.group_key].priority == Level.MEDIUM
def test_evaluate__condition_hole(self):
detector = self.create_detector(
name="Stateful Detector",
project=self.project,
)
detector.workflow_condition_group = self.create_data_condition_group(logic_type="any")
self.create_data_condition(
condition_group=detector.workflow_condition_group,
comparison=5,
type="lte",
condition_result=Level.OK,
)
self.create_data_condition(
condition_group=detector.workflow_condition_group,
comparison=10,
type="gt",
condition_result=Level.HIGH,
)
handler = MockDetectorStateHandler(
detector=detector, thresholds={Level.OK: 1, Level.HIGH: 1}
)
critical_packet = self.packet(1, 15)
critical_result = handler.evaluate(critical_packet)
assert critical_result[self.group_key].priority == Level.HIGH
missing_condition_packet = self.packet(2, 8)
missing_condition_result = handler.evaluate(missing_condition_packet)
# We shouldn't change state, because there wasn't a matching condition
assert missing_condition_result == {}
resolution_packet = self.packet(3, 2)
resolution_result = handler.evaluate(resolution_packet)
assert resolution_result[self.group_key].priority == Level.OK
| TestStatefulDetectorHandlerEvaluate |
python | scipy__scipy | scipy/optimize/tests/test_least_squares.py | {
"start": 14325,
"end": 18951
} | class ____:
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array won't be broadcast
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
def get_bounds_direct(lb, ub):
return lb, ub
def get_bounds_instances(lb, ub):
return Bounds(lb, ub)
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
for bounds_func in [get_bounds_direct, get_bounds_instances]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=bounds_func(0.5, [2.0, 2.0]),
method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=bounds_func([0.3, 0.2], 3.0),
method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac,
bounds=bounds_func([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_bounds_instances(self):
res = least_squares(fun_trivial, 0.5, bounds=Bounds())
assert_allclose(res.x, 0.0, atol=1e-4)
res = least_squares(fun_trivial, 3.0, bounds=Bounds(lb=1.0))
assert_allclose(res.x, 1.0, atol=1e-4)
res = least_squares(fun_trivial, 0.5, bounds=Bounds(lb=-1.0, ub=1.0))
assert_allclose(res.x, 0.0, atol=1e-4)
res = least_squares(fun_trivial, -3.0, bounds=Bounds(ub=-1.0))
assert_allclose(res.x, -1.0, atol=1e-4)
res = least_squares(fun_2d_trivial, [0.5, 0.5],
bounds=Bounds(lb=[-1.0, -1.0], ub=1.0))
assert_allclose(res.x, [0.0, 0.0], atol=1e-5)
res = least_squares(fun_2d_trivial, [0.5, 0.5],
bounds=Bounds(lb=[0.1, 0.1]))
assert_allclose(res.x, [0.1, 0.1], atol=1e-5)
@pytest.mark.fail_slow(10)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
| BoundsMixin |
python | astropy__astropy | astropy/io/ascii/core.py | {
"start": 46075,
"end": 56049
} | class ____(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
# Max column dimension that writer supports for this format. Exceptions
# include ECSV (no limit) and HTML (max_ndim=2).
max_ndim: ClassVar[int | None] = 1
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = {"table": {}, "cols": {}}
def _check_multidim_table(self, table: Table) -> None:
"""Check that the dimensions of columns in ``table`` are acceptable.
The reader class attribute ``max_ndim`` defines the maximum dimension of
columns that can be written using this format. The base value is ``1``,
corresponding to normal scalar columns with just a length.
Parameters
----------
table : `~astropy.table.Table`
Input table.
Raises
------
ValueError
If any column exceeds the number of allowed dimensions
"""
_check_multidim_table(table, self.max_ndim)
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file-like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + "":
self.data.table_name = Path(table).name
# If one of the newline chars is set as field delimiter, only
# accept the other one as line splitter
if self.header.splitter.delimiter == "\n":
newline = "\r"
elif self.header.splitter.delimiter == "\r":
newline = "\n"
else:
newline = None
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table, newline=newline)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = (
f"Number of header columns ({n_cols}) inconsistent with "
f"data columns ({len(str_vals)}) at data line {i}\n"
f"Header values: {[x.name for x in cols]}\n"
f"Data values: {str_vals}"
)
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
if hasattr(self.header, "table_meta"):
self.meta["table"].update(self.header.table_meta)
_apply_include_exclude_names(
self.header, self.names, self.include_names, self.exclude_names
)
self.data.masks(cols)
table = self.outputter(self.header.cols, self.meta)
self.cols = self.header.cols
return table
def inconsistent_handler(self, str_vals: list[str], ncols: int) -> list[str]:
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self) -> list[str]:
"""Return lines in the table that match header.comment regexp."""
if not hasattr(self, "lines"):
raise ValueError(
"Table must be read prior to accessing the header comment lines"
)
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table: Table) -> list[str]:
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(
table, self.names, self.include_names, self.exclude_names
)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Check that table column dimensions are supported by this format class.
# Most formats support only 1-d columns, but some like ECSV support N-d.
self._check_multidim_table(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines: list[str] = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
| BaseReader |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-bitbucket/llama_index/readers/bitbucket/base.py | {
"start": 200,
"end": 5204
} | class ____(BaseReader):
"""
Bitbucket reader.
Reads the content of files in Bitbucket repositories.
"""
def __init__(
self,
base_url: Optional[str] = None,
project_key: Optional[str] = None,
branch: Optional[str] = "refs/heads/develop",
repository: Optional[str] = None,
extensions_to_skip: Optional[List] = [],
) -> None:
"""Initialize with parameters."""
if os.getenv("BITBUCKET_USERNAME") is None:
raise ValueError("Could not find a Bitbucket username.")
if os.getenv("BITBUCKET_API_KEY") is None:
raise ValueError("Could not find a Bitbucket api key.")
if base_url is None:
raise ValueError("You must provide a base url for Bitbucket.")
if project_key is None:
raise ValueError("You must provide a project key for Bitbucket repository.")
self.base_url = base_url
self.project_key = project_key
self.branch = branch
self.extensions_to_skip = extensions_to_skip
self.repository = repository
def get_headers(self):
username = os.getenv("BITBUCKET_USERNAME")
api_token = os.getenv("BITBUCKET_API_KEY")
auth = base64.b64encode(f"{username}:{api_token}".encode()).decode()
return {"Authorization": f"Basic {auth}"}
def get_slugs(self) -> List:
"""
Get slugs of the specific project.
"""
slugs = []
if self.repository is None:
repos_url = (
f"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/"
)
headers = self.get_headers()
response = requests.get(repos_url, headers=headers)
if response.status_code == 200:
repositories = response.json()["values"]
for repo in repositories:
repo_slug = repo["slug"]
slugs.append(repo_slug)
slugs.append(self.repository)
return slugs
def load_all_file_paths(self, slug, branch, directory_path="", paths=[]):
"""
Go inside every file that is present in the repository and get the paths for each file.
"""
content_url = f"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{slug}/browse/{directory_path}"
query_params = {
"at": branch,
}
headers = self.get_headers()
response = requests.get(content_url, headers=headers, params=query_params)
response = response.json()
if "errors" in response:
raise ValueError(response["errors"])
children = response["children"]
for value in children["values"]:
if value["type"] == "FILE":
if value["path"]["extension"] not in self.extensions_to_skip:
paths.append(
{
"slug": slug,
"path": f"{directory_path}/{value['path']['toString']}",
}
)
elif value["type"] == "DIRECTORY":
self.load_all_file_paths(
slug=slug,
branch=branch,
directory_path=f"{directory_path}/{value['path']['toString']}",
paths=paths,
)
def load_text_by_paths(self, slug, file_path, branch) -> List:
"""
Go inside every file that is present in the repository and get the paths for each file.
"""
content_url = f"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{slug}/browse{file_path}"
query_params = {
"at": branch,
}
headers = self.get_headers()
response = requests.get(content_url, headers=headers, params=query_params)
children = response.json()
if "errors" in children:
raise ValueError(children["errors"])
if "lines" in children:
return children["lines"]
return []
def load_text(self, paths) -> List:
text_dict = []
for path in paths:
lines_list = self.load_text_by_paths(
slug=path["slug"], file_path=path["path"], branch=self.branch
)
concatenated_string = ""
for line_dict in lines_list:
text = line_dict.get("text", "")
concatenated_string = concatenated_string + " " + text
text_dict.append(concatenated_string)
return text_dict
def load_data(self) -> List[Document]:
"""Return a list of Document made of each file in Bitbucket."""
slugs = self.get_slugs()
paths = []
for slug in slugs:
self.load_all_file_paths(
slug=slug, branch=self.branch, directory_path="", paths=paths
)
texts = self.load_text(paths)
return [Document(text=text) for text in texts]
| BitbucketReader |
python | getsentry__sentry | tests/sentry/utils/test_exceptions.py | {
"start": 643,
"end": 12061
} | class ____:
def test_with_task_state_and_single_exception_mapping(self) -> None:
"""Test exception_grouping_context with a single exception type mapping."""
mock_task_state = CurrentTaskState(
id="test_id",
namespace="test_namespace",
taskname="test_task",
attempt=1,
processing_deadline_duration=30,
retries_remaining=True,
)
with patch(
"sentry.utils.exceptions.current_task",
return_value=mock_task_state,
):
with patch("sentry_sdk.new_scope") as mock_scope:
mock_scope_instance = Mock()
mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance)
mock_scope.return_value.__exit__ = Mock(return_value=None)
captured_processor = None
def capture_processor(processor: Any) -> None:
nonlocal captured_processor
captured_processor = processor
mock_scope_instance.add_error_processor = capture_processor
exception_mapping: dict[type[BaseException], str] = {
CustomError: "custom.error.fingerprint"
}
with exception_grouping_context(exception_mapping, "refinement1", "refinement2"):
pass
assert captured_processor is not None
# Test with matching exception
event: Any = {}
exc = CustomError("Test error")
exc_info = (CustomError, exc, None)
result = captured_processor(event, exc_info)
assert result["fingerprint"] == [
"custom.error.fingerprint",
"test_namespace",
"test_task",
"refinement1",
"refinement2",
]
def test_with_task_state_and_multiple_exception_mappings(self) -> None:
"""Test exception_grouping_context with multiple exception type mappings."""
mock_task_state = CurrentTaskState(
id="test_id",
namespace="test_namespace",
taskname="test_task",
attempt=1,
processing_deadline_duration=30,
retries_remaining=True,
)
with patch(
"sentry.utils.exceptions.current_task",
return_value=mock_task_state,
):
with patch("sentry_sdk.new_scope") as mock_scope:
mock_scope_instance = Mock()
mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance)
mock_scope.return_value.__exit__ = Mock(return_value=None)
captured_processor = None
def capture_processor(processor: Any) -> None:
nonlocal captured_processor
captured_processor = processor
mock_scope_instance.add_error_processor = capture_processor
exception_mapping = {
CustomError: "custom.error.fingerprint",
AnotherError: "another.error.fingerprint",
ProcessingDeadlineExceeded: "deadline.exceeded",
}
with exception_grouping_context(exception_mapping):
pass
assert captured_processor is not None
# Test first exception type
event1: Any = {}
exc1 = CustomError("Test error")
exc_info1 = (CustomError, exc1, None)
result1 = captured_processor(event1, exc_info1)
assert result1["fingerprint"] == [
"custom.error.fingerprint",
"test_namespace",
"test_task",
]
# Test second exception type
event2: Any = {}
exc2 = AnotherError("Another error")
exc_info2 = (AnotherError, exc2, None)
result2 = captured_processor(event2, exc_info2)
assert result2["fingerprint"] == [
"another.error.fingerprint",
"test_namespace",
"test_task",
]
# Test third exception type
event3: Any = {}
exc3 = ProcessingDeadlineExceeded("Deadline exceeded")
exc_info3 = (ProcessingDeadlineExceeded, exc3, None)
result3 = captured_processor(event3, exc_info3)
assert result3["fingerprint"] == [
"deadline.exceeded",
"test_namespace",
"test_task",
]
def test_with_task_state_and_non_mapped_exception(self) -> None:
"""Test that non-mapped exceptions don't get fingerprints modified."""
mock_task_state = CurrentTaskState(
id="test_id",
namespace="test_namespace",
taskname="test_task",
attempt=1,
processing_deadline_duration=30,
retries_remaining=True,
)
with patch(
"sentry.utils.exceptions.current_task",
return_value=mock_task_state,
):
with patch("sentry_sdk.new_scope") as mock_scope:
mock_scope_instance = Mock()
mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance)
mock_scope.return_value.__exit__ = Mock(return_value=None)
captured_processor = None
def capture_processor(processor: Any) -> None:
nonlocal captured_processor
captured_processor = processor
mock_scope_instance.add_error_processor = capture_processor
exception_mapping: dict[type[BaseException], str] = {
CustomError: "custom.error.fingerprint"
}
with exception_grouping_context(exception_mapping):
pass
assert captured_processor is not None
# Test with unmapped exception
event = {"original": "data"}
exc = ValueError("Unmapped error")
exc_info = (ValueError, exc, None)
result = captured_processor(event, exc_info)
# Event should be unchanged
assert result == {"original": "data"}
assert "fingerprint" not in result
def test_without_task_state(self) -> None:
"""Test that the context works when no task state is available."""
with patch("sentry.utils.exceptions.current_task", return_value=None):
with patch("sentry.utils.exceptions.logger") as mock_logger:
exception_mapping: dict[type[BaseException], str] = {
CustomError: "custom.error.fingerprint"
}
with exception_grouping_context(exception_mapping):
pass
mock_logger.info.assert_called_once_with(
"No task state found in exception_grouping_context"
)
def test_context_manager_yields_correctly(self) -> None:
"""Test that the context manager yields correctly."""
executed = False
with patch("sentry.utils.exceptions.current_task", return_value=None):
exception_mapping: dict[type[BaseException], str] = {
CustomError: "custom.error.fingerprint"
}
with exception_grouping_context(exception_mapping):
executed = True
assert executed is True
def test_exception_inheritance(self) -> None:
"""Test that exception inheritance works correctly."""
mock_task_state = CurrentTaskState(
id="test_id",
namespace="test_namespace",
taskname="test_task",
attempt=1,
processing_deadline_duration=30,
retries_remaining=True,
)
class BaseError(Exception):
pass
class DerivedError(BaseError):
pass
with patch(
"sentry.utils.exceptions.current_task",
return_value=mock_task_state,
):
with patch("sentry_sdk.new_scope") as mock_scope:
mock_scope_instance = Mock()
mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance)
mock_scope.return_value.__exit__ = Mock(return_value=None)
captured_processor = None
def capture_processor(processor: Any) -> None:
nonlocal captured_processor
captured_processor = processor
mock_scope_instance.add_error_processor = capture_processor
# Map the base error
exception_mapping: dict[type[BaseException], str] = {
BaseError: "base.error.fingerprint"
}
with exception_grouping_context(exception_mapping):
pass
assert captured_processor is not None
# Test with derived exception (should match base error mapping)
event: Any = {}
exc = DerivedError("Derived error")
exc_info = (DerivedError, exc, None)
result = captured_processor(event, exc_info)
assert result["fingerprint"] == [
"base.error.fingerprint",
"test_namespace",
"test_task",
]
def test_empty_exception_mapping(self) -> None:
"""Test that empty exception mapping works correctly."""
mock_task_state = CurrentTaskState(
id="test_id",
namespace="test_namespace",
taskname="test_task",
attempt=1,
processing_deadline_duration=30,
retries_remaining=True,
)
with patch(
"sentry.utils.exceptions.current_task",
return_value=mock_task_state,
):
with patch("sentry_sdk.new_scope") as mock_scope:
mock_scope_instance = Mock()
mock_scope.return_value.__enter__ = Mock(return_value=mock_scope_instance)
mock_scope.return_value.__exit__ = Mock(return_value=None)
captured_processor = None
def capture_processor(processor: Any) -> None:
nonlocal captured_processor
captured_processor = processor
mock_scope_instance.add_error_processor = capture_processor
# Empty mapping
exception_mapping: dict[type[BaseException], str] = {}
with exception_grouping_context(exception_mapping):
pass
assert captured_processor is not None
# Test with any exception
event = {"original": "data"}
exc = ValueError("Some error")
exc_info = (ValueError, exc, None)
result = captured_processor(event, exc_info)
# Event should be unchanged
assert result == {"original": "data"}
assert "fingerprint" not in result
| TestExceptionGroupingContext |
python | kamyu104__LeetCode-Solutions | Python/maximum-points-in-an-archery-competition.py | {
"start": 46,
"end": 1034
} | class ____(object):
def maximumBobPoints(self, numArrows, aliceArrows):
"""
:type numArrows: int
:type aliceArrows: List[int]
:rtype: List[int]
"""
def check(mask, numArrows):
score = 0
cnt = [0]*len(aliceArrows)
i, base = 0, 1
for k, a in enumerate(aliceArrows):
if mask&1:
need = a+1
if need > numArrows:
return 0, [0]*len(aliceArrows)
numArrows -= need
cnt[k] = need
score += k
mask >>= 1
cnt[-1] += numArrows
return score, cnt
result = [0]*len(aliceArrows)
best = 0
for mask in xrange(1, 2**len(aliceArrows)):
score, cnt = check(mask, numArrows)
if score > best:
best = score
result = cnt
return result
| Solution |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/build_env.py | {
"start": 2625,
"end": 9757
} | class ____:
"""Creates and manages an isolated environment to install build deps"""
def __init__(self) -> None:
temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
self._prefixes = OrderedDict(
(name, _Prefix(os.path.join(temp_dir.path, name)))
for name in ("normal", "overlay")
)
self._bin_dirs: List[str] = []
self._lib_dirs: List[str] = []
for prefix in reversed(list(self._prefixes.values())):
self._bin_dirs.append(prefix.bin_dir)
self._lib_dirs.extend(prefix.lib_dirs)
# Customize site to:
# - ensure .pth files are honored
# - prevent access to system site packages
system_sites = _get_system_sitepackages()
self._site_dir = os.path.join(temp_dir.path, "site")
if not os.path.exists(self._site_dir):
os.mkdir(self._site_dir)
with open(
os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
) as fp:
fp.write(
textwrap.dedent(
"""
import os, site, sys
# First, drop system-sites related paths.
original_sys_path = sys.path[:]
known_paths = set()
for path in {system_sites!r}:
site.addsitedir(path, known_paths=known_paths)
system_paths = set(
os.path.normcase(path)
for path in sys.path[len(original_sys_path):]
)
original_sys_path = [
path for path in original_sys_path
if os.path.normcase(path) not in system_paths
]
sys.path = original_sys_path
# Second, add lib directories.
# ensuring .pth file are processed.
for path in {lib_dirs!r}:
assert not path in sys.path
site.addsitedir(path)
"""
).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
)
def __enter__(self) -> None:
self._save_env = {
name: os.environ.get(name, None)
for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
}
path = self._bin_dirs[:]
old_path = self._save_env["PATH"]
if old_path:
path.extend(old_path.split(os.pathsep))
pythonpath = [self._site_dir]
os.environ.update(
{
"PATH": os.pathsep.join(path),
"PYTHONNOUSERSITE": "1",
"PYTHONPATH": os.pathsep.join(pythonpath),
}
)
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
for varname, old_value in self._save_env.items():
if old_value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = old_value
def check_requirements(
self, reqs: Iterable[str]
) -> Tuple[Set[Tuple[str, str]], Set[str]]:
"""Return 2 sets:
- conflicting requirements: set of (installed, wanted) reqs tuples
- missing requirements: set of reqs
"""
missing = set()
conflicting = set()
if reqs:
env = (
get_environment(self._lib_dirs)
if hasattr(self, "_lib_dirs")
else get_default_environment()
)
for req_str in reqs:
req = get_requirement(req_str)
# We're explicitly evaluating with an empty extra value, since build
# environments are not provided any mechanism to select specific extras.
if req.marker is not None and not req.marker.evaluate({"extra": ""}):
continue
dist = env.get_distribution(req.name)
if not dist:
missing.add(req_str)
continue
if isinstance(dist.version, Version):
installed_req_str = f"{req.name}=={dist.version}"
else:
installed_req_str = f"{req.name}==={dist.version}"
if not req.specifier.contains(dist.version, prereleases=True):
conflicting.add((installed_req_str, req_str))
# FIXME: Consider direct URL?
return conflicting, missing
def install_requirements(
self,
finder: "PackageFinder",
requirements: Iterable[str],
prefix_as_string: str,
*,
kind: str,
) -> None:
prefix = self._prefixes[prefix_as_string]
assert not prefix.setup
prefix.setup = True
if not requirements:
return
self._install_requirements(
get_runnable_pip(),
finder,
requirements,
prefix,
kind=kind,
)
@staticmethod
def _install_requirements(
pip_runnable: str,
finder: "PackageFinder",
requirements: Iterable[str],
prefix: _Prefix,
*,
kind: str,
) -> None:
args: List[str] = [
sys.executable,
pip_runnable,
"install",
"--ignore-installed",
"--no-user",
"--prefix",
prefix.path,
"--no-warn-script-location",
"--disable-pip-version-check",
]
if logger.getEffectiveLevel() <= logging.DEBUG:
args.append("-vv")
elif logger.getEffectiveLevel() <= VERBOSE:
args.append("-v")
for format_control in ("no_binary", "only_binary"):
formats = getattr(finder.format_control, format_control)
args.extend(
(
"--" + format_control.replace("_", "-"),
",".join(sorted(formats or {":none:"})),
)
)
index_urls = finder.index_urls
if index_urls:
args.extend(["-i", index_urls[0]])
for extra_index in index_urls[1:]:
args.extend(["--extra-index-url", extra_index])
else:
args.append("--no-index")
for link in finder.find_links:
args.extend(["--find-links", link])
for host in finder.trusted_hosts:
args.extend(["--trusted-host", host])
if finder.allow_all_prereleases:
args.append("--pre")
if finder.prefer_binary:
args.append("--prefer-binary")
args.append("--")
args.extend(requirements)
extra_environ = {"_PIP_STANDALONE_CERT": where()}
with open_spinner(f"Installing {kind}") as spinner:
call_subprocess(
args,
command_desc=f"pip subprocess to install {kind}",
spinner=spinner,
extra_environ=extra_environ,
)
| BuildEnvironment |
python | jina-ai__jina | tests/integration/deployment_http_composite/test_deployment_http_composite.py | {
"start": 186,
"end": 7681
} | class ____(Executor):
def __init__(self, init_sleep_time=0, *args, **kwargs):
super().__init__(*args, **kwargs)
time.sleep(init_sleep_time)
@requests(on='/foo')
async def foo(self, docs, **kwargs):
for doc in docs:
doc.text += f'return foo {os.getpid()}'
doc.tags['pid'] = os.getpid()
@requests(on='/bar')
async def bar(self, docs, **kwargs):
for doc in docs:
doc.text += f'return bar {os.getpid()}'
doc.tags['pid'] = os.getpid()
@requests(on='/error')
async def raise_error(self, docs, **kwargs):
raise Exception('Raised exception in request')
@requests(on='/parameters')
async def return_parameters(self, docs, **kwargs):
return {'pid': os.getpid()}
@requests(on='/docsparams')
async def docs_with_params(self, docs, parameters, **kwargs):
for doc in docs:
doc.text = parameters['key']
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('cors', [True, False])
@pytest.mark.parametrize('protocols', [['http'], ['grpc'], ['grpc', 'http']])
@pytest.mark.parametrize('init_sleep_time', [0, 0.5, 5])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_slow_load_executor(
replicas, include_gateway, protocols, init_sleep_time, cors
):
if replicas > 1 and not include_gateway:
return
ports = [random_port() for _ in range(len(protocols))]
d = Deployment(
uses=SingleExecutorDeployment,
uses_with={'init_sleep_time': init_sleep_time},
replicas=replicas,
protocol=protocols,
port=ports,
include_gateway=include_gateway,
cors=cors,
)
with d:
for protocol, port in zip(protocols, ports):
c = Client(protocol=protocol, port=port)
res = c.post(on='/foo', inputs=DocumentArray.empty(10), request_size=1)
assert len(res) == 10
assert all(['foo' in doc.text for doc in res])
different_pids = set([doc.tags['pid'] for doc in res])
assert len(different_pids) == replicas
res = c.post(on='/bar', inputs=DocumentArray.empty(10), request_size=1)
assert len(res) == 10
assert all(['bar' in doc.text for doc in res])
different_pids = set([doc.tags['pid'] for doc in res])
assert len(different_pids) == replicas
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('protocol', ['grpc', 'http'])
@pytest.mark.parametrize('init_sleep_time', [0, 0.5, 5])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_post_from_deployment(replicas, include_gateway, protocol, init_sleep_time):
if replicas > 1 and not include_gateway:
return
d = Deployment(
uses=SingleExecutorDeployment,
uses_with={'init_sleep_time': init_sleep_time},
replicas=replicas,
protocol=protocol,
include_gateway=include_gateway,
)
with d:
res = d.post(on='/foo', inputs=DocumentArray.empty(10), request_size=1)
assert all(['foo' in doc.text for doc in res])
different_pids = set([doc.tags['pid'] for doc in res])
assert len(different_pids) == replicas
res = d.post(on='/bar', inputs=DocumentArray.empty(10), request_size=1)
assert len(res) == 10
assert all(['bar' in doc.text for doc in res])
different_pids = set([doc.tags['pid'] for doc in res])
assert len(different_pids) == replicas
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('protocols', [['http'], ['grpc', 'http']])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_base_executor(replicas, include_gateway, protocols):
if replicas > 1 and not include_gateway:
return
ports = [random_port() for _ in range(len(protocols))]
d = Deployment(
replicas=replicas,
protocol=protocols,
port=ports,
include_gateway=include_gateway,
)
with d:
for protocol, port in zip(protocols, ports):
c = Client(protocol=protocol, port=port)
res = c.post(on='/default', inputs=DocumentArray.empty(10), request_size=1)
assert len(res) == 10
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('protocols', [['http'], ['grpc', 'http']])
@pytest.mark.parametrize('init_sleep_time', [0, 0.5, 5])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_return_parameters(replicas, include_gateway, protocols, init_sleep_time):
if replicas > 1 and not include_gateway:
return
ports = [random_port() for _ in range(len(protocols))]
d = Deployment(
uses=SingleExecutorDeployment,
uses_with={'init_sleep_time': init_sleep_time},
replicas=replicas,
protocol=protocols,
port=ports,
include_gateway=include_gateway,
)
with d:
for protocol, port in zip(protocols, ports):
c = Client(protocol=protocol, port=port)
res = c.post(
on='/parameters',
inputs=DocumentArray.empty(10),
request_size=1,
return_responses=True,
)
assert len(res) == 10
assert all(
['__results__' in response.parameters.keys() for response in res]
)
different_pids = set(
[
list(response.parameters['__results__'].values())[0]['pid']
for response in res
]
)
assert len(different_pids) == replicas
res = c.post(
on='/docsparams',
inputs=DocumentArray.empty(10),
parameters={'key': 'value'},
request_size=1,
)
assert len(res) == 10
assert all([doc.text == 'value' for doc in res])
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('protocols', [['http'], ['grpc', 'http']])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_invalid_protocols_with_shards(replicas, include_gateway, protocols):
if replicas > 1 and not include_gateway:
return
with pytest.raises(RuntimeError):
d = Deployment(
replicas=replicas,
protocol=protocols,
include_gateway=include_gateway,
shards=2,
)
with d:
pass
@pytest.mark.parametrize('replicas', [1, 2, 3])
@pytest.mark.parametrize('include_gateway', [True, False])
@pytest.mark.parametrize('protocols', [['websocket'], ['grpc', 'websocket']])
@pytest.mark.skipif(docarray_v2, reason='tests support for docarray<0.30')
def test_invalid_websocket_protocol(replicas, include_gateway, protocols):
if replicas > 1 and not include_gateway:
return
with pytest.raises(RuntimeError):
d = Deployment(
replicas=replicas, protocol=protocols, include_gateway=include_gateway
)
with d:
pass
| SingleExecutorDeployment |
python | Textualize__textual | src/textual/events.py | {
"start": 19830,
"end": 20464
} | class ____(Event, bubble=True, verbose=True):
"""Sent when the mouse is moved over a widget.
Note that this event bubbles, so a widget may receive this event when the mouse
moves over a child widget. Check the `node` attribute for the widget directly under
the mouse.
- [X] Bubbles
- [X] Verbose
"""
__slots__ = ["node"]
def __init__(self, node: DOMNode) -> None:
self.node = node
"""The node directly under the mouse."""
super().__init__()
@property
def control(self) -> DOMNode:
"""Alias for the `node` under the mouse."""
return self.node
| Enter |
python | spack__spack | lib/spack/spack/test/llnl/util/lock.py | {
"start": 8818,
"end": 9319
} | class ____:
def __init__(self, lock_path, start=0, length=0):
self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, start=self.start, length=self.length)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until timeout in other procs.
| AcquireWrite |
python | tensorflow__tensorflow | tensorflow/python/util/function_utils_test.py | {
"start": 4330,
"end": 7353
} | class ____(test.TestCase):
def test_simple_function(self):
fn_has_kwargs = lambda **x: x
self.assertTrue(function_utils.has_kwargs(fn_has_kwargs))
fn_has_no_kwargs = lambda x: x
self.assertFalse(function_utils.has_kwargs(fn_has_no_kwargs))
def test_callable(self):
class FooHasKwargs(object):
def __call__(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs()))
class FooHasNoKwargs(object):
def __call__(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs()))
def test_bound_method(self):
class FooHasKwargs(object):
def fn(self, **x):
del x
self.assertTrue(function_utils.has_kwargs(FooHasKwargs().fn))
class FooHasNoKwargs(object):
def fn(self, x):
del x
self.assertFalse(function_utils.has_kwargs(FooHasNoKwargs().fn))
def test_partial_function(self):
expected_test_arg = 123
def fn_has_kwargs(test_arg, **x):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg=123)
self.assertTrue(function_utils.has_kwargs(wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg):
if test_arg != expected_test_arg:
return ValueError('partial fn does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg=123)
self.assertFalse(function_utils.has_kwargs(wrapped_fn))
some_arg = 1
self.assertEqual(wrapped_fn(some_arg), some_arg)
def test_double_partial(self):
expected_test_arg1 = 123
expected_test_arg2 = 456
def fn_has_kwargs(test_arg1, test_arg2, **x):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertTrue(function_utils.has_kwargs(double_wrapped_fn))
some_kwargs = dict(x=1, y=2, z=3)
self.assertEqual(double_wrapped_fn(**some_kwargs), some_kwargs)
def fn_has_no_kwargs(x, test_arg1, test_arg2):
if test_arg1 != expected_test_arg1 or test_arg2 != expected_test_arg2:
return ValueError('partial does not work correctly')
return x
wrapped_fn = functools.partial(fn_has_no_kwargs, test_arg2=456)
double_wrapped_fn = functools.partial(wrapped_fn, test_arg1=123)
self.assertFalse(function_utils.has_kwargs(double_wrapped_fn))
some_arg = 1
self.assertEqual(double_wrapped_fn(some_arg), some_arg) # pylint: disable=no-value-for-parameter
def test_raises_type_error(self):
with self.assertRaisesRegex(TypeError,
'should be a callable'):
function_utils.has_kwargs('not a function')
| HasKwargsTest |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/schemas.py | {
"start": 53127,
"end": 53214
} | class ____(Protocol):
def __call__(self, *args: FxValue) -> list[FxValue]: ...
| FlatFn |
python | gevent__gevent | src/greentest/3.10/test_asyncore.py | {
"start": 26182,
"end": 26346
} | class ____(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, 'poll'), 'select.poll required')
| TestAPI_UseIPv6Select |
python | django__django | tests/queryset_pickle/models.py | {
"start": 251,
"end": 444
} | class ____(models.QuerySet):
def __getstate__(self):
state = super().__getstate__()
state[DJANGO_VERSION_PICKLE_KEY] = "1.0"
return state
| PreviousDjangoVersionQuerySet |
python | PyCQA__pylint | doc/data/messages/s/self-cls-assignment/bad.py | {
"start": 0,
"end": 233
} | class ____:
@classmethod
def list_fruits(cls):
cls = "apple" # [self-cls-assignment]
def print_color(self, *colors):
self = "red" # [self-cls-assignment]
color = colors[1]
print(color)
| Fruit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.