language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_external_user.py | {
"start": 49,
"end": 3690
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-external-user"
method = "post"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.org_slug = self.organization.slug # force creation
self.integration, _ = self.create_provider_integration_for(
self.organization, self.user, provider="github", name="GitHub", external_id="github:1"
)
self.data = {
"externalName": "@NisanthanNanthakumar",
"provider": "github",
"userId": self.user.id,
"integrationId": self.integration.id,
}
def test_basic_post(self) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_success_response(self.org_slug, status_code=201, **self.data)
assert response.data == {
**self.data,
"id": str(response.data["id"]),
"userId": str(self.user.id),
"integrationId": str(self.integration.id),
}
def test_without_feature_flag(self) -> None:
with self.feature({"organizations:integrations-codeowners": False}):
response = self.get_error_response(self.org_slug, status_code=403, **self.data)
assert response.data == {"detail": "You do not have permission to perform this action."}
def test_missing_provider(self) -> None:
self.data.pop("provider")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_error_response(self.org_slug, status_code=400, **self.data)
assert response.data == {"provider": ["This field is required."]}
def test_missing_externalName(self) -> None:
self.data.pop("externalName")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_error_response(self.org_slug, status_code=400, **self.data)
assert response.data == {"externalName": ["This field is required."]}
def test_missing_userId(self) -> None:
self.data.pop("userId")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_error_response(self.org_slug, status_code=400, **self.data)
assert response.data == {"userId": ["This field is required."]}
def test_missing_integrationId(self) -> None:
self.data.pop("integrationId")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_error_response(self.org_slug, status_code=400, **self.data)
assert response.data == {"integrationId": ["This field is required."]}
def test_invalid_provider(self) -> None:
self.data.update(provider="unknown")
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_error_response(self.org_slug, status_code=400, **self.data)
assert response.data == {"provider": ['"unknown" is not a valid choice.']}
def test_create_existing_association(self) -> None:
self.external_user = self.create_external_user(
self.user, self.organization, self.integration, external_name=self.data["externalName"]
)
with self.feature({"organizations:integrations-codeowners": True}):
response = self.get_success_response(self.org_slug, status_code=200, **self.data)
assert response.data == {
**self.data,
"id": str(self.external_user.id),
"userId": str(self.user.id),
"integrationId": str(self.integration.id),
}
| ExternalUserTest |
python | PyCQA__pylint | tests/regrtest_data/max_inferable_limit_for_classes/other_funcs.py | {
"start": 134,
"end": 197
} | class ____(HasCacheKey, HasMemoized):
...
| MemoizedHasCacheKey |
python | fastai__fastai | fastai/callback/progress.py | {
"start": 3069,
"end": 4060
} | class ____(Callback):
"Update a graph of training and validation loss"
order,run_valid=65,False
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds")
if not(self.run): return
self.nb_batches = []
assert hasattr(self.learn, 'progress')
def after_train(self): self.nb_batches.append(self.train_iter)
def after_epoch(self):
"Plot validation loss in the pbar graph"
if not self.nb_batches: return
rec = self.learn.recorder
iters = range_of(rec.losses)
val_losses = [v[1] for v in rec.values]
x_bounds = (0, (self.n_epoch - len(self.nb_batches)) * self.nb_batches[0] + len(rec.losses))
y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(val_losses)))))
self.progress.mbar.update_graph([(iters, rec.losses), (self.nb_batches, val_losses)], x_bounds, y_bounds)
# %% ../../nbs/16_callback.progress.ipynb 26
| ShowGraphCallback |
python | tensorflow__tensorflow | tensorflow/python/tpu/preempted_hook.py | {
"start": 1680,
"end": 3217
} | class ____(threading.Thread):
"""A thread that polls the state of a TPU node.
When the node transitions into a TERMINAL state (PREEMPTED, TERMINATED)
that's considered as not recoverable by the underlying infrastructure,
it attempts to close the session, and exits the entire process if the
session.close() stucks.
"""
def __init__(self, cluster, session):
super(_TPUPollingThread, self).__init__()
self.daemon = True
self._running = True
self._session_closed = False
self._cluster = cluster
self._session = session
self._interval = 30
# Some of the Google API libraries are quite chatty, so disable them.
for name in ['googleapiclient.discovery', 'oauth2client.client']:
_logging.getLogger(name).setLevel(_logging.WARNING)
def stop(self):
self._running = False
self._session_closed = True
self.join()
def run(self):
if not tpu_cluster_resolver.is_running_in_gce():
logging.warning(
'TPUPollingThread is running in a non-GCE environment, exiting...')
self._running = False
return
while self._running:
recoverable = self._cluster._cloud_tpu_client.recoverable() # pylint: disable=protected-access
if not recoverable:
logging.warning(
'TPUPollingThread found TPU %s in state %s',
self._cluster._tpu, self._cluster._cloud_tpu_client.state()) # pylint: disable=protected-access
os._exit(1) # pylint: disable=protected-access
time.sleep(self._interval)
| _TPUPollingThread |
python | TheAlgorithms__Python | neural_network/back_propagation_neural_network.py | {
"start": 499,
"end": 3067
} | class ____:
"""
Layers of BP neural network
"""
def __init__(
self, units, activation=None, learning_rate=None, is_input_layer=False
):
"""
common connected layer of bp network
:param units: numbers of neural units
:param activation: activation function
:param learning_rate: learning rate for paras
:param is_input_layer: whether it is input layer or not
"""
self.units = units
self.weight = None
self.bias = None
self.activation = activation
if learning_rate is None:
learning_rate = 0.3
self.learn_rate = learning_rate
self.is_input_layer = is_input_layer
def initializer(self, back_units):
rng = np.random.default_rng()
self.weight = np.asmatrix(rng.normal(0, 0.5, (self.units, back_units)))
self.bias = np.asmatrix(rng.normal(0, 0.5, self.units)).T
if self.activation is None:
self.activation = sigmoid
def cal_gradient(self):
# activation function may be sigmoid or linear
if self.activation == sigmoid:
gradient_mat = np.dot(self.output, (1 - self.output).T)
gradient_activation = np.diag(np.diag(gradient_mat))
else:
gradient_activation = 1
return gradient_activation
def forward_propagation(self, xdata):
self.xdata = xdata
if self.is_input_layer:
# input layer
self.wx_plus_b = xdata
self.output = xdata
return xdata
else:
self.wx_plus_b = np.dot(self.weight, self.xdata) - self.bias
self.output = self.activation(self.wx_plus_b)
return self.output
def back_propagation(self, gradient):
gradient_activation = self.cal_gradient() # i * i 维
gradient = np.asmatrix(np.dot(gradient.T, gradient_activation))
self._gradient_weight = np.asmatrix(self.xdata)
self._gradient_bias = -1
self._gradient_x = self.weight
self.gradient_weight = np.dot(gradient.T, self._gradient_weight.T)
self.gradient_bias = gradient * self._gradient_bias
self.gradient = np.dot(gradient, self._gradient_x).T
# upgrade: the Negative gradient direction
self.weight = self.weight - self.learn_rate * self.gradient_weight
self.bias = self.bias - self.learn_rate * self.gradient_bias.T
# updates the weights and bias according to learning rate (0.3 if undefined)
return self.gradient
| DenseLayer |
python | kamyu104__LeetCode-Solutions | Python/permutations-iv.py | {
"start": 47,
"end": 824
} | class ____(object):
def permute(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[int]
"""
result = []
cnt = [1]*n
for i in xrange(len(cnt)-1):
cnt[i+1] = min(cnt[i]*((i+2)//2), k)
lookup = [False]*n
for i in xrange(n):
for j in xrange(n):
if not (not lookup[j] and ((i == 0 and n%2 == 0) or (j+1)%2 == (1 if not result else (result[-1]%2)^1))):
continue
if k <= cnt[n-1-i]:
break
k -= cnt[n-1-i]
else:
return []
lookup[j] = True
result.append(j+1)
return result
# Time: O(n^2)
# Space: O(n)
# combinatorics
| Solution |
python | run-llama__llama_index | llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py | {
"start": 997,
"end": 1055
} | class ____(Event):
messages: List[ChatMessage]
| LoopEvent |
python | PrefectHQ__prefect | tests/server/schemas/test_actions.py | {
"start": 15647,
"end": 17525
} | class ____:
@pytest.mark.parametrize(
"schema_type",
[DeploymentScheduleCreate, DeploymentScheduleUpdate],
)
@pytest.mark.parametrize(
"max_scheduled_runs,expected_error_substr",
[
(
420000,
f"be less than or equal to {PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value()}",
),
],
)
def test_deployment_schedule_validation_error(
self, schema_type, max_scheduled_runs, expected_error_substr
):
with pytest.raises(ValueError, match=expected_error_substr):
schema_type(
schedule=CronSchedule(cron="0 0 * * *"),
max_scheduled_runs=max_scheduled_runs,
)
@pytest.mark.parametrize(
"schema_type",
[DeploymentScheduleCreate, DeploymentScheduleUpdate],
)
@pytest.mark.parametrize(
"max_scheduled_runs",
[-1, 0],
)
def test_deployment_schedule_validation_error_invalid_max_scheduled_runs(
self, schema_type, max_scheduled_runs
):
with pytest.raises(ValidationError):
schema_type(
schedule=CronSchedule(cron="0 0 * * *"),
max_scheduled_runs=max_scheduled_runs,
)
@pytest.mark.parametrize(
"schema_type",
[DeploymentScheduleCreate, DeploymentScheduleUpdate],
)
@pytest.mark.parametrize(
"max_scheduled_runs",
[1, PREFECT_DEPLOYMENT_SCHEDULE_MAX_SCHEDULED_RUNS.value()],
)
def test_deployment_schedule_validation_success(
self, schema_type, max_scheduled_runs
):
schedule = schema_type(
schedule=CronSchedule(cron="0 0 * * *"),
max_scheduled_runs=max_scheduled_runs,
)
assert schedule.max_scheduled_runs == max_scheduled_runs
| TestDeploymentScheduleValidation |
python | doocs__leetcode | solution/1500-1599/1559.Detect Cycles in 2D Grid/Solution.py | {
"start": 0,
"end": 960
} | class ____:
def containsCycle(self, grid: List[List[str]]) -> bool:
m, n = len(grid), len(grid[0])
vis = [[False] * n for _ in range(m)]
dirs = (-1, 0, 1, 0, -1)
for i, row in enumerate(grid):
for j, x in enumerate(row):
if vis[i][j]:
continue
vis[i][j] = True
q = [(i, j, -1, -1)]
while q:
x, y, px, py = q.pop()
for dx, dy in pairwise(dirs):
nx, ny = x + dx, y + dy
if 0 <= nx < m and 0 <= ny < n:
if grid[nx][ny] != grid[i][j] or (nx == px and ny == py):
continue
if vis[nx][ny]:
return True
vis[nx][ny] = True
q.append((nx, ny, x, y))
return False
| Solution |
python | django__django | tests/admin_views/admin.py | {
"start": 23545,
"end": 24125
} | class ____(admin.ModelAdmin):
inlines = [
RelatedPrepopulatedInline1,
RelatedPrepopulatedInline2,
RelatedPrepopulatedInline3,
RelatedPrepopulatedStackedInlineNoFieldsets,
]
fieldsets = (
(
None,
{"fields": (("pubdate", "status"), ("name", "slug1", "slug2", "slug3"))},
),
)
formfield_overrides = {models.CharField: {"strip": False}}
prepopulated_fields = {
"slug1": ["name", "pubdate"],
"slug2": ["status", "name"],
"slug3": ["name"],
}
| MainPrepopulatedAdmin |
python | sympy__sympy | sympy/physics/biomechanics/curve.py | {
"start": 25485,
"end": 32778
} | class ____(CharacteristicCurveFunction):
r"""Inverse passive muscle fiber force-length curve based on De Groote et
al., 2016 [1]_.
Explanation
===========
Gives the normalized muscle fiber length that produces a specific normalized
passive muscle fiber force.
The function is defined by the equation:
${fl^M_{pas}}^{-1} = \frac{c_0 \log{\left(\exp{c_1} - 1\right)fl^M_pas + 1}}{c_1} + 1$
with constant values of $c_0 = 0.6$ and $c_1 = 4.0$. This function is the
exact analytical inverse of the related tendon force-length curve
``FiberForceLengthPassiveDeGroote2016``.
While it is possible to change the constant values, these were carefully
selected in the original publication to give the characteristic curve
specific and required properties. For example, the function produces a
passive fiber force very close to 0 for all normalized fiber lengths
between 0 and 1.
Examples
========
The preferred way to instantiate
:class:`FiberForceLengthPassiveInverseDeGroote2016` is using the
:meth:`~.with_defaults` constructor because this will automatically populate the
constants within the characteristic curve equation with the floating point
values from the original publication. This constructor takes a single
argument corresponding to the normalized passive muscle fiber length-force
component of the muscle fiber force. We'll create a :class:`~.Symbol` called
``fl_M_pas`` to represent this.
>>> from sympy import Symbol
>>> from sympy.physics.biomechanics import FiberForceLengthPassiveInverseDeGroote2016
>>> fl_M_pas = Symbol('fl_M_pas')
>>> l_M_tilde = FiberForceLengthPassiveInverseDeGroote2016.with_defaults(fl_M_pas)
>>> l_M_tilde
FiberForceLengthPassiveInverseDeGroote2016(fl_M_pas, 0.6, 4.0)
It's also possible to populate the two constants with your own values too.
>>> from sympy import symbols
>>> c0, c1 = symbols('c0 c1')
>>> l_M_tilde = FiberForceLengthPassiveInverseDeGroote2016(fl_M_pas, c0, c1)
>>> l_M_tilde
FiberForceLengthPassiveInverseDeGroote2016(fl_M_pas, c0, c1)
To inspect the actual symbolic expression that this function represents,
we can call the :meth:`~.doit` method on an instance. We'll use the keyword
argument ``evaluate=False`` as this will keep the expression in its
canonical form and won't simplify any constants.
>>> l_M_tilde.doit(evaluate=False)
c0*log(1 + fl_M_pas*(exp(c1) - 1))/c1 + 1
The function can also be differentiated. We'll differentiate with respect
to fl_M_pas using the ``diff`` method on an instance with the single positional
argument ``fl_M_pas``.
>>> l_M_tilde.diff(fl_M_pas)
c0*(exp(c1) - 1)/(c1*(fl_M_pas*(exp(c1) - 1) + 1))
References
==========
.. [1] De Groote, F., Kinney, A. L., Rao, A. V., & Fregly, B. J., Evaluation
of direct collocation optimal control problem formulations for
solving the muscle redundancy problem, Annals of biomedical
engineering, 44(10), (2016) pp. 2922-2936
"""
@classmethod
def with_defaults(cls, fl_M_pas):
r"""Recommended constructor that will use the published constants.
Explanation
===========
Returns a new instance of the inverse muscle fiber passive force-length
function using the four constant values specified in the original
publication.
These have the values:
$c_0 = 0.6$
$c_1 = 4.0$
Parameters
==========
fl_M_pas : Any (sympifiable)
Normalized passive muscle fiber force as a function of muscle fiber
length.
"""
c0 = Float('0.6')
c1 = Float('4.0')
return cls(fl_M_pas, c0, c1)
@classmethod
def eval(cls, fl_M_pas, c0, c1):
"""Evaluation of basic inputs.
Parameters
==========
fl_M_pas : Any (sympifiable)
Normalized passive muscle fiber force.
c0 : Any (sympifiable)
The first constant in the characteristic equation. The published
value is ``0.6``.
c1 : Any (sympifiable)
The second constant in the characteristic equation. The published
value is ``4.0``.
"""
pass
def _eval_evalf(self, prec):
"""Evaluate the expression numerically using ``evalf``."""
return self.doit(deep=False, evaluate=False)._eval_evalf(prec)
def doit(self, deep=True, evaluate=True, **hints):
"""Evaluate the expression defining the function.
Parameters
==========
deep : bool
Whether ``doit`` should be recursively called. Default is ``True``.
evaluate : bool.
Whether the SymPy expression should be evaluated as it is
constructed. If ``False``, then no constant folding will be
conducted which will leave the expression in a more numerically-
stable for values of ``l_T_tilde`` that correspond to a sensible
operating range for a musculotendon. Default is ``True``.
**kwargs : dict[str, Any]
Additional keyword argument pairs to be recursively passed to
``doit``.
"""
fl_M_pas, *constants = self.args
if deep:
hints['evaluate'] = evaluate
fl_M_pas = fl_M_pas.doit(deep=deep, **hints)
c0, c1 = [c.doit(deep=deep, **hints) for c in constants]
else:
c0, c1 = constants
if evaluate:
return c0*log(fl_M_pas*(exp(c1) - 1) + 1)/c1 + 1
return c0*log(UnevaluatedExpr(fl_M_pas*(exp(c1) - 1)) + 1)/c1 + 1
def fdiff(self, argindex=1):
"""Derivative of the function with respect to a single argument.
Parameters
==========
argindex : int
The index of the function's arguments with respect to which the
derivative should be taken. Argument indexes start at ``1``.
Default is ``1``.
"""
fl_M_pas, c0, c1 = self.args
if argindex == 1:
return c0*(exp(c1) - 1)/(c1*(fl_M_pas*(exp(c1) - 1) + 1))
elif argindex == 2:
return log(fl_M_pas*(exp(c1) - 1) + 1)/c1
elif argindex == 3:
return (
c0*fl_M_pas*exp(c1)/(c1*(fl_M_pas*(exp(c1) - 1) + 1))
- c0*log(fl_M_pas*(exp(c1) - 1) + 1)/c1**2
)
raise ArgumentIndexError(self, argindex)
def inverse(self, argindex=1):
"""Inverse function.
Parameters
==========
argindex : int
Value to start indexing the arguments at. Default is ``1``.
"""
return FiberForceLengthPassiveDeGroote2016
def _latex(self, printer):
"""Print a LaTeX representation of the function defining the curve.
Parameters
==========
printer : Printer
The printer to be used to print the LaTeX string representation.
"""
fl_M_pas = self.args[0]
_fl_M_pas = printer._print(fl_M_pas)
return r'\left( \operatorname{fl}^M_{pas} \right)^{-1} \left( %s \right)' % _fl_M_pas
| FiberForceLengthPassiveInverseDeGroote2016 |
python | charliermarsh__ruff | crates/ty_completion_eval/truth/import-deprioritizes-type_check_only/module.py | {
"start": 85,
"end": 221
} | class ____: pass
@type_check_only
def unique_prefix_apple() -> None: pass
def unique_prefix_azurous() -> None: pass
| UniquePrefixAzurous |
python | pytorch__pytorch | test/distributed/checkpoint/test_file_system_checkpoint.py | {
"start": 3228,
"end": 5141
} | class ____(TestCase):
def test_read_write_only_tensor(self) -> None:
with tempfile.TemporaryDirectory() as path:
state_dict_to_save = MyTestModule().state_dict()
fs_writer = FileSystemWriter(path=path)
save_state_dict(
state_dict=state_dict_to_save,
storage_writer=fs_writer,
no_dist=True,
)
state_dict_to_load_to = MyTestModule().state_dict()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Load from file without any resharding
fs_reader = FileSystemReader(path=path)
load_state_dict(
state_dict=state_dict_to_load_to,
storage_reader=fs_reader,
no_dist=True,
)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
with tempfile.TemporaryDirectory() as path:
state_dict_to_save = MyTestModule().state_dict()
fs_writer = FileSystemWriter(path=path, single_file_per_rank=True)
save_state_dict(
state_dict=state_dict_to_save,
storage_writer=fs_writer,
no_dist=True,
)
state_dict_to_load_to = MyTestModule().state_dict()
with self.assertRaises(AssertionError):
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
# Load from file without any resharding
fs_reader = FileSystemReader(path=path)
load_state_dict(
state_dict=state_dict_to_load_to,
storage_reader=fs_reader,
no_dist=True,
)
assert_state_dict_equal(self, state_dict_to_load_to, state_dict_to_save)
| TestDistributedStateDictSaveLoad |
python | google__pytype | pytype/tests/test_utils.py | {
"start": 659,
"end": 2029
} | class ____:
"""Context handler for creating temporary directories."""
def __enter__(self):
self.path = compatible_tempfile.mkdtemp()
return self
def create_directory(self, filename):
"""Create a subdirectory in the temporary directory."""
path = path_utils.join(self.path, filename)
makedirs(path)
return path
def create_file(self, filename, indented_data=None):
"""Create a file in the temporary directory. Dedents the data if needed."""
filedir, filename = path_utils.split(filename)
if filedir:
self.create_directory(filedir)
path = path_utils.join(self.path, filedir, filename)
if isinstance(indented_data, bytes):
# This is binary data rather than text.
mode = "wb"
data = indented_data
else:
mode = "w"
data = textwrap.dedent(indented_data) if indented_data else indented_data
with open(path, mode) as fi:
if data:
fi.write(data)
return path
def delete_file(self, filename):
os.unlink(path_utils.join(self.path, filename))
def __exit__(self, error_type, value, tb):
shutil.rmtree(path=self.path)
return False # reraise any exceptions
def __getitem__(self, filename):
"""Get the full path for an entry in this directory."""
return path_utils.join(self.path, filename)
@dataclasses.dataclass(eq=True, frozen=True)
| Tempdir |
python | django__django | django/template/defaulttags.py | {
"start": 4710,
"end": 8411
} | class ____(Node):
child_nodelists = ("nodelist_loop", "nodelist_empty")
def __init__(
self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None
):
self.loopvars = loopvars
self.sequence = sequence
self.is_reversed = is_reversed
self.nodelist_loop = nodelist_loop
if nodelist_empty is None:
self.nodelist_empty = NodeList()
else:
self.nodelist_empty = nodelist_empty
def __repr__(self):
reversed_text = " reversed" if self.is_reversed else ""
return "<%s: for %s in %s, tail_len: %d%s>" % (
self.__class__.__name__,
", ".join(self.loopvars),
self.sequence,
len(self.nodelist_loop),
reversed_text,
)
def render(self, context):
if "forloop" in context:
parentloop = context["forloop"]
else:
parentloop = {}
with context.push():
values = self.sequence.resolve(context, ignore_failures=True)
if values is None:
values = []
if not hasattr(values, "__len__"):
values = list(values)
len_values = len(values)
if len_values < 1:
return self.nodelist_empty.render(context)
nodelist = []
if self.is_reversed:
values = reversed(values)
num_loopvars = len(self.loopvars)
unpack = num_loopvars > 1
# Create a forloop value in the context. We'll update counters on
# each iteration just below.
loop_dict = context["forloop"] = {
"parentloop": parentloop,
"length": len_values,
}
for i, item in enumerate(values):
# Shortcuts for current loop iteration number.
loop_dict["counter0"] = i
loop_dict["counter"] = i + 1
# Reverse counter iteration numbers.
loop_dict["revcounter"] = len_values - i
loop_dict["revcounter0"] = len_values - i - 1
# Boolean values designating first and last times through loop.
loop_dict["first"] = i == 0
loop_dict["last"] = i == len_values - 1
pop_context = False
if unpack:
# If there are multiple loop variables, unpack the item
# into them.
try:
len_item = len(item)
except TypeError: # not an iterable
len_item = 1
# Check loop variable count before unpacking
if num_loopvars != len_item:
raise ValueError(
"Need {} values to unpack in for loop; got {}. ".format(
num_loopvars, len_item
),
)
unpacked_vars = dict(zip(self.loopvars, item))
pop_context = True
context.update(unpacked_vars)
else:
context[self.loopvars[0]] = item
for node in self.nodelist_loop:
nodelist.append(node.render_annotated(context))
if pop_context:
# Pop the loop variables pushed on to the context to avoid
# the context ending up in an inconsistent state when other
# tags (e.g., include and with) push data to context.
context.pop()
return mark_safe("".join(nodelist))
| ForNode |
python | getsentry__sentry | tests/sentry/lang/javascript/test_errorlocale.py | {
"start": 204,
"end": 3961
} | class ____(TestCase):
def test_basic_translation(self) -> None:
actual = "Type mismatch"
expected = translate_message("Typenkonflikt")
assert actual == expected
def test_unicode_translation(self) -> None:
expected = "Division by zero"
actual = translate_message("Divisi\xf3n por cero")
assert actual == expected
def test_same_translation(self) -> None:
expected = "Out of memory"
actual = translate_message("Out of memory")
assert actual == expected
def test_unknown_translation(self) -> None:
expected = "Some unknown message"
actual = translate_message("Some unknown message")
assert actual == expected
def test_translation_with_type(self) -> None:
expected = "RangeError: Subscript out of range"
actual = translate_message("RangeError: Indeks poza zakresem")
assert actual == expected
def test_translation_with_type_and_colon(self) -> None:
expected = "RangeError: Cannot define property: object is not extensible"
actual = translate_message(
"RangeError: Nie mo\u017cna zdefiniowa\u0107 w\u0142a\u015bciwo\u015bci: obiekt nie jest rozszerzalny"
)
assert actual == expected
def test_interpolated_translation(self) -> None:
expected = "Type 'foo' not found"
actual = translate_message("Nie odnaleziono typu \u201efoo\u201d")
assert actual == expected
def test_interpolated_translation_with_colon(self) -> None:
expected = "'this' is not of expected type: foo"
actual = translate_message(
"Typ obiektu \u201ethis\u201d jest inny ni\u017c oczekiwany: foo"
)
assert actual == expected
def test_interpolated_translation_with_colon_in_front(self) -> None:
expected = "foo: an unexpected failure occurred while trying to obtain metadata information"
actual = translate_message(
"foo: wyst\u0105pi\u0142 nieoczekiwany b\u0142\u0105d podczas pr\xf3by uzyskania informacji o metadanych"
)
assert actual == expected
def test_interpolated_translation_with_type(self) -> None:
expected = "TypeError: Type 'foo' not found"
actual = translate_message("TypeError: Nie odnaleziono typu \u201efoo\u201d")
assert actual == expected
def test_interpolated_translation_with_type_and_colon(self) -> None:
expected = "ReferenceError: Cannot modify property 'foo': 'length' is not writable"
actual = translate_message(
"ReferenceError: Nie mo\u017cna zmodyfikowa\u0107 w\u0142a\u015bciwo\u015bci \u201efoo\u201d: warto\u015b\u0107 \u201elength\u201d jest niezapisywalna"
)
assert actual == expected
def test_translate_exception(self) -> None:
data = {
"logentry": {"message": "Typenkonflikt", "formatted": "Typenkonflikt"},
"exception": {"values": [{"value": "Typenkonflikt"}, {"value": "Typenkonflikt"}]},
}
translate_exception(data)
assert data == {
"logentry": {"message": "Type mismatch", "formatted": "Type mismatch"},
"exception": {"values": [{"value": "Type mismatch"}, {"value": "Type mismatch"}]},
}
def test_translate_exception_missing(self) -> None:
data: dict[str, Any] = {}
translate_exception(data)
assert data == {}
def test_translate_exception_none(self) -> None:
expected = {
"logentry": {"message": None, "formatted": None},
"exception": {"values": [None, {"value": None}]},
}
actual = deepcopy(expected)
translate_exception(actual)
assert actual == expected
| ErrorLocaleTest |
python | falconry__falcon | falcon/util/structures.py | {
"start": 1400,
"end": 4209
} | class ____(MutableMapping): # pragma: no cover
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.abc.MutableMapping`` as well as dict's `copy`. Also
provides `lower_items`.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, and ``items()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data: Iterable[tuple[str, Any]] | None = None, **kwargs: Any):
self._store: dict[str, tuple[str, Any]] = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key: str, value: Any) -> None:
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key: str) -> Any:
return self._store[key.lower()][1]
def __delitem__(self, key: str) -> None:
del self._store[key.lower()]
def __iter__(self) -> Iterator[str]:
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self) -> int:
return len(self._store)
def lower_items(self) -> Iterator[tuple[str, Any]]:
"""Like iteritems(), but with all lowercase keys."""
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
def __eq__(self, other: object) -> bool:
if isinstance(other, Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self) -> CaseInsensitiveDict:
return CaseInsensitiveDict(self._store.values())
def __repr__(self) -> str:
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
# NOTE(vytas): Although Context is effectively implementing the MutableMapping
# interface, we choose not to subclass MutableMapping to stress the fact that
# Context is, by design, a bare class, and the mapping interface may be
# removed in a future Falcon release.
| CaseInsensitiveDict |
python | django__django | tests/admin_inlines/admin.py | {
"start": 2757,
"end": 2881
} | class ____(PhotoInlineMixin, admin.StackedInline):
fieldsets = []
classes = ["collapse"]
| PhotoStackedCollapsibleInline |
python | py-pdf__pypdf | pypdf/errors.py | {
"start": 1124,
"end": 1329
} | class ____(PdfReadError):
"""
Raised when a PDF file that has been encrypted
(meaning it requires a password to be accessed) has not been successfully
decrypted.
"""
| FileNotDecryptedError |
python | celery__celery | t/unit/worker/test_consumer.py | {
"start": 38958,
"end": 40528
} | class ____:
def test_start_no_replies(self):
c = Mock()
c.app.connection_for_read = _amqp_connection()
mingle = Mingle(c)
I = c.app.control.inspect.return_value = Mock()
I.hello.return_value = {}
mingle.start(c)
def test_start(self):
c = Mock()
c.app.connection_for_read = _amqp_connection()
mingle = Mingle(c)
assert mingle.enabled
Aig = LimitedSet()
Big = LimitedSet()
Aig.add('Aig-1')
Aig.add('Aig-2')
Big.add('Big-1')
I = c.app.control.inspect.return_value = Mock()
I.hello.return_value = {
'A@example.com': {
'clock': 312,
'revoked': Aig._data,
},
'B@example.com': {
'clock': 29,
'revoked': Big._data,
},
'C@example.com': {
'error': 'unknown method',
},
}
our_revoked = c.controller.state.revoked = LimitedSet()
mingle.start(c)
I.hello.assert_called_with(c.hostname, our_revoked._data)
c.app.clock.adjust.assert_has_calls([
call(312), call(29),
], any_order=True)
assert 'Aig-1' in our_revoked
assert 'Aig-2' in our_revoked
assert 'Big-1' in our_revoked
def _amqp_connection():
connection = ContextMock(name='Connection')
connection.return_value = ContextMock(name='connection')
connection.return_value.transport.driver_type = 'amqp'
return connection
| test_Mingle |
python | aio-libs__aiohttp | aiohttp/http_exceptions.py | {
"start": 1000,
"end": 1283
} | class ____(HttpProcessingError):
code = 400
message = "Bad Request"
def __init__(
self, message: str, *, headers: CIMultiDict[str] | None = None
) -> None:
super().__init__(message=message, headers=headers)
self.args = (message,)
| BadHttpMessage |
python | pyinstaller__pyinstaller | PyInstaller/lib/modulegraph/modulegraph.py | {
"start": 20461,
"end": 21023
} | class ____(str):
"""
Placeholder aliasing an existing source module to a non-existent target
module (i.e., the desired alias).
For obscure reasons, this class subclasses `str`. Each instance of this
class is the fully-qualified name of the existing source module being
aliased. Unlike the related `AliasNode` class, instances of this class are
_not_ actual nodes and hence _not_ added to the graph; they only facilitate
communication between the `ModuleGraph.alias_module()` and
`ModuleGraph.find_node()` methods.
"""
| Alias |
python | doocs__leetcode | lcof2/剑指 Offer II 025. 链表中的两数相加/Solution.py | {
"start": 151,
"end": 720
} | class ____:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
s1, s2 = [], []
while l1:
s1.append(l1.val)
l1 = l1.next
while l2:
s2.append(l2.val)
l2 = l2.next
carry, dummy = 0, ListNode()
while s1 or s2 or carry:
carry += (0 if not s1 else s1.pop()) + (0 if not s2 else s2.pop())
# 创建结点,利用头插法将结点插入链表
node = ListNode(carry % 10, dummy.next)
dummy.next = node
carry //= 10
return dummy.next
| Solution |
python | django__django | tests/migrations/migrations_test_apps/with_generic_model/migrations/0001_initial.py | {
"start": 58,
"end": 1058
} | class ____(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
name="GenericModel",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
bases=(typing.Generic, models.Model),
),
migrations.CreateModel(
name="GenericModelPEP695",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
bases=(models.Model, typing.Generic),
),
]
| Migration |
python | pandas-dev__pandas | pandas/core/strings/object_array.py | {
"start": 607,
"end": 16563
} | class ____:
"""
String Methods operating on object-dtype ndarrays.
"""
def __len__(self) -> int:
# For typing, _str_map relies on the object being sized.
raise NotImplementedError
def _str_getitem(self, key):
if isinstance(key, slice):
return self._str_slice(start=key.start, stop=key.stop, step=key.step)
else:
return self._str_get(key)
def _str_map(
self,
f,
na_value=lib.no_default,
dtype: NpDtype | None = None,
convert: bool = True,
):
"""
Map a callable over valid elements of the array.
Parameters
----------
f : Callable
A function to call on each non-NA element.
na_value : Scalar, optional
The value to set for NA values. Might also be used for the
fill value if the callable `f` raises an exception.
This defaults to ``self.dtype.na_value`` which is ``np.nan``
for object-dtype and Categorical and ``pd.NA`` for StringArray.
dtype : Dtype, optional
The dtype of the result array.
convert : bool, default True
Whether to call `maybe_convert_objects` on the resulting ndarray
"""
if dtype is None:
dtype = np.dtype("object")
if na_value is lib.no_default:
na_value = self.dtype.na_value # type: ignore[attr-defined]
if not len(self):
return np.array([], dtype=dtype)
arr = np.asarray(self, dtype=object)
mask = isna(arr)
map_convert = convert and not np.all(mask)
try:
result = lib.map_infer_mask(
arr, f, mask.view(np.uint8), convert=map_convert
)
except (TypeError, AttributeError) as err:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
p_err = (
r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "
r"(?(3)required )positional arguments?"
)
if len(err.args) >= 1 and re.search(p_err, err.args[0]):
# FIXME: this should be totally avoidable
raise err
def g(x):
# This type of fallback behavior can be removed once
# we remove object-dtype .str accessor.
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return self._str_map(g, na_value=na_value, dtype=dtype)
if not isinstance(result, np.ndarray):
return result
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if convert and result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
def _str_count(self, pat, flags: int = 0):
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return self._str_map(f, dtype="int64")
def _str_pad(
self,
width: int,
side: Literal["left", "right", "both"] = "left",
fillchar: str = " ",
):
if side == "left":
f = lambda x: x.rjust(width, fillchar)
elif side == "right":
f = lambda x: x.ljust(width, fillchar)
elif side == "both":
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError("Invalid side")
return self._str_map(f)
def _str_contains(
self,
pat,
case: bool = True,
flags: int = 0,
na=lib.no_default,
regex: bool = True,
):
validate_na_arg(na, name="na")
if regex:
if not case:
flags |= re.IGNORECASE
pat = re.compile(pat, flags=flags)
f = lambda x: pat.search(x) is not None
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x.upper()
return self._str_map(f, na, dtype=np.dtype("bool"))
def _str_startswith(self, pat, na=lib.no_default):
validate_na_arg(na, name="na")
f = lambda x: x.startswith(pat)
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_endswith(self, pat, na=lib.no_default):
validate_na_arg(na, name="na")
f = lambda x: x.endswith(pat)
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
flags: int = 0,
regex: bool = True,
):
if case is False:
# add case flag, if provided
flags |= re.IGNORECASE
if regex or flags or callable(repl):
if not isinstance(pat, re.Pattern):
if regex is False:
pat = re.escape(pat)
pat = re.compile(pat, flags=flags)
n = n if n >= 0 else 0
f = lambda x: pat.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
return self._str_map(f, dtype=str)
def _str_repeat(self, repeats: int | Sequence[int]):
if lib.is_integer(repeats):
rint = cast(int, repeats)
def scalar_rep(x):
try:
return bytes.__mul__(x, rint)
except TypeError:
return str.__mul__(x, rint)
return self._str_map(scalar_rep, dtype=str)
else:
from pandas.core.arrays.string_ import BaseStringArray
def rep(x, r):
if x is libmissing.NA:
return x
try:
return bytes.__mul__(x, r)
except TypeError:
return str.__mul__(x, r)
result = libops.vec_binop(
np.asarray(self),
np.asarray(repeats, dtype=object),
rep,
)
if not isinstance(self, BaseStringArray):
return result
# Not going through map, so we have to do this here.
return type(self)._from_sequence(result, dtype=self.dtype)
def _str_match(
self,
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if not case:
flags |= re.IGNORECASE
if isinstance(pat, re.Pattern):
# We need to check that flags matches pat.flags.
# pat.flags will have re.U regardless, so we need to add it here
# before checking for a match
flags = flags | re.U
if flags != pat.flags:
raise ValueError("Cannot pass flags that do not match pat.flags")
regex = pat
else:
regex = re.compile(pat, flags=flags)
f = lambda x: regex.match(x) is not None
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_fullmatch(
self,
pat: str | re.Pattern,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
f = lambda x: regex.fullmatch(x) is not None
return self._str_map(f, na_value=na, dtype=np.dtype(bool))
def _str_encode(self, encoding, errors: str = "strict"):
f = lambda x: x.encode(encoding, errors=errors)
return self._str_map(f, dtype=object)
def _str_find(self, sub, start: int = 0, end=None):
return self._str_find_(sub, start, end, side="left")
def _str_rfind(self, sub, start: int = 0, end=None):
return self._str_find_(sub, start, end, side="right")
def _str_find_(self, sub, start, end, side):
if side == "left":
method = "find"
elif side == "right":
method = "rfind"
else: # pragma: no cover
raise ValueError("Invalid side")
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_findall(self, pat, flags: int = 0):
regex = re.compile(pat, flags=flags)
return self._str_map(regex.findall, dtype="object")
def _str_get(self, i):
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return self.dtype.na_value # type: ignore[attr-defined]
return self._str_map(f)
def _str_index(self, sub, start: int = 0, end=None):
if end:
f = lambda x: x.index(sub, start, end)
else:
f = lambda x: x.index(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_rindex(self, sub, start: int = 0, end=None):
if end:
f = lambda x: x.rindex(sub, start, end)
else:
f = lambda x: x.rindex(sub, start, end)
return self._str_map(f, dtype="int64")
def _str_join(self, sep: str):
return self._str_map(sep.join)
def _str_partition(self, sep: str, expand):
result = self._str_map(lambda x: x.partition(sep), dtype="object")
return result
def _str_rpartition(self, sep: str, expand):
return self._str_map(lambda x: x.rpartition(sep), dtype="object")
def _str_len(self):
return self._str_map(len, dtype="int64")
def _str_slice(self, start=None, stop=None, step=None):
obj = slice(start, stop, step)
return self._str_map(lambda x: x[obj])
def _str_slice_replace(self, start=None, stop=None, repl=None):
if repl is None:
repl = ""
def f(x):
if x[start:stop] == "":
local_stop = start
else:
local_stop = stop
y = ""
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return self._str_map(f)
def _str_split(
self,
pat: str | re.Pattern | None = None,
n=-1,
expand: bool = False,
regex: bool | None = None,
):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
new_pat: str | re.Pattern
if regex is True or isinstance(pat, re.Pattern):
new_pat = re.compile(pat)
elif regex is False:
new_pat = pat
# regex is None so link to old behavior #43563
else:
if len(pat) == 1:
new_pat = pat
else:
new_pat = re.compile(pat)
if isinstance(new_pat, re.Pattern):
if n is None or n == -1:
n = 0
f = lambda x: new_pat.split(x, maxsplit=n)
else:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
return self._str_map(f, dtype=object)
def _str_rsplit(self, pat=None, n=-1):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
return self._str_map(f, dtype="object")
def _str_translate(self, table):
return self._str_map(lambda x: x.translate(table))
def _str_wrap(self, width: int, **kwargs):
kwargs["width"] = width
tw = textwrap.TextWrapper(**kwargs)
return self._str_map(lambda s: "\n".join(tw.wrap(s)))
def _str_get_dummies(self, sep: str = "|", dtype: NpDtype | None = None):
from pandas import Series
if dtype is None:
dtype = np.int64
arr = Series(self).fillna("")
try:
arr = sep + arr + sep
except (TypeError, NotImplementedError):
arr = sep + arr.astype(str) + sep
tags: set[str] = set()
for ts in Series(arr, copy=False).str.split(sep):
tags.update(ts)
tags2 = sorted(tags - {""})
_dtype = pandas_dtype(dtype)
dummies_dtype: NpDtype
if isinstance(_dtype, np.dtype):
dummies_dtype = _dtype
else:
dummies_dtype = np.bool_
dummies = np.empty((len(arr), len(tags2)), dtype=dummies_dtype, order="F")
def _isin(test_elements: str, element: str) -> bool:
return element in test_elements
for i, t in enumerate(tags2):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(
arr.to_numpy(), functools.partial(_isin, element=pat)
)
return dummies, tags2
def _str_upper(self):
return self._str_map(lambda x: x.upper())
def _str_isalnum(self):
return self._str_map(str.isalnum, dtype="bool")
def _str_isalpha(self):
return self._str_map(str.isalpha, dtype="bool")
def _str_isascii(self):
return self._str_map(str.isascii, dtype="bool")
def _str_isdecimal(self):
return self._str_map(str.isdecimal, dtype="bool")
def _str_isdigit(self):
return self._str_map(str.isdigit, dtype="bool")
def _str_islower(self):
return self._str_map(str.islower, dtype="bool")
def _str_isnumeric(self):
return self._str_map(str.isnumeric, dtype="bool")
def _str_isspace(self):
return self._str_map(str.isspace, dtype="bool")
def _str_istitle(self):
return self._str_map(str.istitle, dtype="bool")
def _str_isupper(self):
return self._str_map(str.isupper, dtype="bool")
def _str_capitalize(self):
return self._str_map(str.capitalize)
def _str_casefold(self):
return self._str_map(str.casefold)
def _str_title(self):
return self._str_map(str.title)
def _str_swapcase(self):
return self._str_map(str.swapcase)
def _str_lower(self):
return self._str_map(str.lower)
def _str_normalize(self, form):
f = lambda x: unicodedata.normalize(form, x)
return self._str_map(f)
def _str_strip(self, to_strip=None):
return self._str_map(lambda x: x.strip(to_strip))
def _str_lstrip(self, to_strip=None):
return self._str_map(lambda x: x.lstrip(to_strip))
def _str_rstrip(self, to_strip=None):
return self._str_map(lambda x: x.rstrip(to_strip))
def _str_removeprefix(self, prefix: str):
return self._str_map(lambda x: x.removeprefix(prefix))
def _str_removesuffix(self, suffix: str):
return self._str_map(lambda x: x.removesuffix(suffix))
def _str_extract(self, pat: str, flags: int = 0, expand: bool = True):
regex = re.compile(pat, flags=flags)
na_value = self.dtype.na_value # type: ignore[attr-defined]
if not expand:
def g(x):
m = regex.search(x)
return m.groups()[0] if m else na_value
return self._str_map(g, convert=False)
empty_row = [na_value] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [na_value if item is None else item for item in m.groups()]
else:
return empty_row
return [f(val) for val in np.asarray(self)]
def _str_zfill(self, width: int):
return self._str_map(lambda x: x.zfill(width))
| ObjectStringArrayMixin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/bijector_test.py | {
"start": 7024,
"end": 7468
} | class ____(bijector.Bijector):
"""Only used for jacobian calculations."""
def __init__(self, forward_min_event_ndims=0):
super().__init__(
validate_args=False,
is_constant_jacobian=False,
forward_min_event_ndims=forward_min_event_ndims,
name="exp")
def _inverse_log_det_jacobian(self, y):
return -math_ops.log(y)
def _forward_log_det_jacobian(self, x):
return math_ops.log(x)
| ExpOnlyJacobian |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_collection_membership_file_urls.py | {
"start": 659,
"end": 1017
} | class ____(GQLResult):
typename__: Typename[
Literal["ArtifactCollection", "ArtifactPortfolio", "ArtifactSequence"]
]
artifact_membership: Optional[
ArtifactCollectionMembershipFileUrlsProjectArtifactCollectionArtifactMembership
] = Field(alias="artifactMembership")
| ArtifactCollectionMembershipFileUrlsProjectArtifactCollection |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_dialect.py | {
"start": 13360,
"end": 17471
} | class ____(fixtures.TablesTest):
__only_on__ = "sqlite"
__backend__ = True
run_create_tables = "each"
@classmethod
def define_tables(self, metadata):
meta = metadata
Table("created", meta, Column("foo", Integer), Column("bar", String))
Table("local_only", meta, Column("q", Integer), Column("p", Integer))
Table(
"created",
meta,
Column("id", Integer),
Column("name", String),
schema="test_schema",
)
Table(
"another_created",
meta,
Column("bat", Integer),
Column("hoho", String),
schema="test_schema",
)
def test_no_tables(self, connection):
tt = self.tables("test_schema.created", "test_schema.another_created")
for t in tt:
t.drop(connection)
insp = inspect(connection)
eq_(insp.get_table_names("test_schema"), [])
def test_column_names(self, connection):
insp = inspect(connection)
eq_(
[
d["name"]
for d in insp.get_columns("created", schema="test_schema")
],
["id", "name"],
)
eq_(
[d["name"] for d in insp.get_columns("created", schema=None)],
["foo", "bar"],
)
with expect_raises(exc.NoSuchTableError):
insp.get_columns("nonexistent", schema="test_schema")
with expect_raises(exc.NoSuchTableError):
insp.get_columns("another_created", schema=None)
with expect_raises(exc.NoSuchTableError):
insp.get_columns("local_only", schema="test_schema")
eq_([d["name"] for d in insp.get_columns("local_only")], ["q", "p"])
def test_table_names_present(self, connection):
insp = inspect(connection)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_table_names_system(self, connection):
insp = inspect(connection)
eq_(
set(insp.get_table_names("test_schema")),
{"created", "another_created"},
)
def test_schema_names(self, connection):
insp = inspect(connection)
eq_(insp.get_schema_names(), ["main", "test_schema"])
# implicitly creates a "temp" schema
connection.exec_driver_sql("select * from sqlite_temp_master")
# we're not including it
insp = inspect(connection)
eq_(insp.get_schema_names(), ["main", "test_schema"])
def test_reflect_system_table(self, connection):
meta = MetaData()
alt_master = Table(
"sqlite_master",
meta,
autoload_with=connection,
schema="test_schema",
)
assert len(alt_master.c) > 0
def test_reflect_user_table(self, connection):
m2 = MetaData()
c2 = Table("created", m2, autoload_with=connection)
eq_(len(c2.c), 2)
def test_crud(self, connection):
(ct,) = self.tables("test_schema.created")
connection.execute(ct.insert(), {"id": 1, "name": "foo"})
eq_(connection.execute(ct.select()).fetchall(), [(1, "foo")])
connection.execute(ct.update(), {"id": 2, "name": "bar"})
eq_(connection.execute(ct.select()).fetchall(), [(2, "bar")])
connection.execute(ct.delete())
eq_(connection.execute(ct.select()).fetchall(), [])
def test_col_targeting(self, connection):
(ct,) = self.tables("test_schema.created")
connection.execute(ct.insert(), {"id": 1, "name": "foo"})
row = connection.execute(ct.select()).first()
eq_(row._mapping["id"], 1)
eq_(row._mapping["name"], "foo")
def test_col_targeting_union(self, connection):
(ct,) = self.tables("test_schema.created")
connection.execute(ct.insert(), {"id": 1, "name": "foo"})
row = connection.execute(ct.select().union(ct.select())).first()
eq_(row._mapping["id"], 1)
eq_(row._mapping["name"], "foo")
| AttachedDBTest |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 73975,
"end": 75369
} | class ____(
JoinIntoAnExternalTransactionFixture
):
@testing.requires.compat_savepoints
def test_something_with_context_managers(self):
A = self.A
a1 = A()
with self.session.begin():
self.session.add(a1)
self.session.flush()
self._assert_count(1)
self.session.rollback()
self._assert_count(0)
a1 = A()
with self.session.begin():
self.session.add(a1)
self._assert_count(1)
a2 = A()
with self.session.begin():
self.session.add(a2)
self.session.flush()
self._assert_count(2)
self.session.rollback()
self._assert_count(1)
@testing.requires.compat_savepoints
def test_super_abusive_nesting(self):
session = self.session
for i in range(random.randint(5, 30)):
choice = random.randint(1, 3)
if choice == 1:
if session.in_transaction():
session.begin_nested()
else:
session.begin()
elif choice == 2:
session.rollback()
elif choice == 3:
session.commit()
session.connection()
# remaining nested / etc. are cleanly cleared out
session.close()
| CtxManagerJoinIntoAnExternalTransactionFixture |
python | python__mypy | mypy/types.py | {
"start": 49121,
"end": 49437
} | class ____(ProperType):
"""Placeholder for an erased type.
This is used during type inference. This has the special property that
it is ignored during type inference.
"""
__slots__ = ()
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_erased_type(self)
| ErasedType |
python | giampaolo__psutil | psutil/_pswindows.py | {
"start": 3748,
"end": 13719
} | class ____(enum.IntEnum):
IOPRIO_VERYLOW = 0
IOPRIO_LOW = 1
IOPRIO_NORMAL = 2
IOPRIO_HIGH = 3
globals().update(IOPriority.__members__)
pinfo_map = dict(
num_handles=0,
ctx_switches=1,
user_time=2,
kernel_time=3,
create_time=4,
num_threads=5,
io_rcount=6,
io_wcount=7,
io_rbytes=8,
io_wbytes=9,
io_count_others=10,
io_bytes_others=11,
num_page_faults=12,
peak_wset=13,
wset=14,
peak_paged_pool=15,
paged_pool=16,
peak_non_paged_pool=17,
non_paged_pool=18,
pagefile=19,
peak_pagefile=20,
mem_private=21,
)
# =====================================================================
# --- utils
# =====================================================================
@functools.lru_cache(maxsize=512)
def convert_dos_path(s):
r"""Convert paths using native DOS format like:
"\Device\HarddiskVolume1\Windows\systemew\file.txt" or
"\??\C:\Windows\systemew\file.txt"
into:
"C:\Windows\systemew\file.txt".
"""
if s.startswith('\\\\'):
return s
rawdrive = '\\'.join(s.split('\\')[:3])
if rawdrive in {"\\??\\UNC", "\\Device\\Mup"}:
rawdrive = '\\'.join(s.split('\\')[:5])
driveletter = '\\\\' + '\\'.join(s.split('\\')[3:5])
elif rawdrive.startswith('\\??\\'):
driveletter = s.split('\\')[2]
else:
driveletter = cext.QueryDosDevice(rawdrive)
remainder = s[len(rawdrive) :]
return os.path.join(driveletter, remainder)
@memoize
def getpagesize():
return cext.getpagesize()
# =====================================================================
# --- memory
# =====================================================================
def virtual_memory():
"""System virtual memory as a namedtuple."""
mem = cext.virtual_mem()
totphys, availphys, _totsys, _availsys = mem
total = totphys
avail = availphys
free = availphys
used = total - avail
percent = usage_percent((total - avail), total, round_=1)
return ntp.svmem(total, avail, percent, used, free)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = cext.virtual_mem()
total_phys = mem[0]
total_system = mem[2]
# system memory (commit total/limit) is the sum of physical and swap
# thus physical memory values need to be subtracted to get swap values
total = total_system - total_phys
# commit total is incremented immediately (decrementing free_system)
# while the corresponding free physical value is not decremented until
# pages are accessed, so we can't use free system memory for swap.
# instead, we calculate page file usage based on performance counter
if total > 0:
percentswap = cext.swap_percent()
used = int(0.01 * percentswap * total)
else:
percentswap = 0.0
used = 0
free = total - used
percent = round(percentswap, 1)
return ntp.sswap(total, used, free, percent, 0, 0)
# malloc / heap functions
heap_info = cext.heap_info
heap_trim = cext.heap_trim
# =====================================================================
# --- disk
# =====================================================================
disk_io_counters = cext.disk_io_counters
def disk_usage(path):
"""Return disk usage associated with path."""
if isinstance(path, bytes):
# XXX: do we want to use "strict"? Probably yes, in order
# to fail immediately. After all we are accepting input here...
path = path.decode(ENCODING, errors="strict")
total, used, free = cext.disk_usage(path)
percent = usage_percent(used, total, round_=1)
return ntp.sdiskusage(total, used, free, percent)
def disk_partitions(all):
"""Return disk partitions."""
rawlist = cext.disk_partitions(all)
return [ntp.sdiskpart(*x) for x in rawlist]
# =====================================================================
# --- CPU
# =====================================================================
def cpu_times():
"""Return system CPU times as a named tuple."""
user, system, idle = cext.cpu_times()
# Internally, GetSystemTimes() is used, and it doesn't return
# interrupt and dpc times. cext.per_cpu_times() does, so we
# rely on it to get those only.
percpu_summed = ntp.scputimes(
*[sum(n) for n in zip(*cext.per_cpu_times())]
)
return ntp.scputimes(
user, system, idle, percpu_summed.interrupt, percpu_summed.dpc
)
def per_cpu_times():
"""Return system per-CPU times as a list of named tuples."""
ret = []
for user, system, idle, interrupt, dpc in cext.per_cpu_times():
item = ntp.scputimes(user, system, idle, interrupt, dpc)
ret.append(item)
return ret
def cpu_count_logical():
"""Return the number of logical CPUs in the system."""
return cext.cpu_count_logical()
def cpu_count_cores():
"""Return the number of CPU cores in the system."""
return cext.cpu_count_cores()
def cpu_stats():
"""Return CPU statistics."""
ctx_switches, interrupts, _dpcs, syscalls = cext.cpu_stats()
soft_interrupts = 0
return ntp.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls)
def cpu_freq():
"""Return CPU frequency.
On Windows per-cpu frequency is not supported.
"""
curr, max_ = cext.cpu_freq()
min_ = 0.0
return [ntp.scpufreq(float(curr), min_, float(max_))]
_loadavg_initialized = False
_lock = threading.Lock()
def _getloadavg_impl():
# Drop to 2 decimal points which is what Linux does
raw_loads = cext.getloadavg()
return tuple(round(load, 2) for load in raw_loads)
def getloadavg():
"""Return the number of processes in the system run queue averaged
over the last 1, 5, and 15 minutes respectively as a tuple.
"""
global _loadavg_initialized
if _loadavg_initialized:
return _getloadavg_impl()
with _lock:
if not _loadavg_initialized:
cext.init_loadavg_counter()
_loadavg_initialized = True
return _getloadavg_impl()
# =====================================================================
# --- network
# =====================================================================
def net_connections(kind, _pid=-1):
"""Return socket connections. If pid == -1 return system-wide
connections (as opposed to connections opened by one process only).
"""
families, types = conn_tmap[kind]
rawlist = cext.net_connections(_pid, families, types)
ret = set()
for item in rawlist:
fd, fam, type, laddr, raddr, status, pid = item
nt = conn_to_ntuple(
fd,
fam,
type,
laddr,
raddr,
status,
TCP_STATUSES,
pid=pid if _pid == -1 else None,
)
ret.add(nt)
return list(ret)
def net_if_stats():
"""Get NIC stats (isup, duplex, speed, mtu)."""
ret = {}
rawdict = cext.net_if_stats()
for name, items in rawdict.items():
isup, duplex, speed, mtu = items
if hasattr(_common, 'NicDuplex'):
duplex = _common.NicDuplex(duplex)
ret[name] = ntp.snicstats(isup, duplex, speed, mtu, '')
return ret
def net_io_counters():
"""Return network I/O statistics for every network interface
installed on the system as a dict of raw tuples.
"""
return cext.net_io_counters()
def net_if_addrs():
"""Return the addresses associated to each NIC."""
return cext.net_if_addrs()
# =====================================================================
# --- sensors
# =====================================================================
def sensors_battery():
"""Return battery information."""
# For constants meaning see:
# https://msdn.microsoft.com/en-us/library/windows/desktop/
# aa373232(v=vs.85).aspx
acline_status, flags, percent, secsleft = cext.sensors_battery()
power_plugged = acline_status == 1
no_battery = bool(flags & 128)
charging = bool(flags & 8)
if no_battery:
return None
if power_plugged or charging:
secsleft = _common.POWER_TIME_UNLIMITED
elif secsleft == -1:
secsleft = _common.POWER_TIME_UNKNOWN
return ntp.sbattery(percent, secsleft, power_plugged)
# =====================================================================
# --- other system functions
# =====================================================================
_last_btime = 0
def boot_time():
"""The system boot time expressed in seconds since the epoch. This
also includes the time spent during hybernate / suspend.
"""
# This dirty hack is to adjust the precision of the returned
# value which may have a 1 second fluctuation, see:
# https://github.com/giampaolo/psutil/issues/1007
global _last_btime
ret = time.time() - cext.uptime()
if abs(ret - _last_btime) <= 1:
return _last_btime
else:
_last_btime = ret
return ret
def users():
"""Return currently connected users as a list of namedtuples."""
retlist = []
rawlist = cext.users()
for item in rawlist:
user, hostname, tstamp = item
nt = ntp.suser(user, None, hostname, tstamp, None)
retlist.append(nt)
return retlist
# =====================================================================
# --- Windows services
# =====================================================================
def win_service_iter():
"""Yields a list of WindowsService instances."""
for name, display_name in cext.winservice_enumerate():
yield WindowsService(name, display_name)
def win_service_get(name):
"""Open a Windows service and return it as a WindowsService instance."""
service = WindowsService(name, None)
service._display_name = service._query_config()['display_name']
return service
| IOPriority |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/yaml_utils/source_position.py | {
"start": 6354,
"end": 6871
} | class ____(NamedTuple):
"""Represents a source position and key path within a file.
Args:
key_path (KeyPath): The path of keys that lead to the current object, where each element in
the path is either a string (for dict keys) or an integer (for list indices).
source_position (Optional[SourcePosition]): The source position of the object in the
document, if available.
"""
key_path: KeyPath
source_position: Optional[SourcePosition]
| SourcePositionAndKeyPath |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 50003,
"end": 50954
} | class ____(Token):
""" Represents a 'for-loop' in the code.
Expressions are of the form:
"while condition:
body..."
Parameters
==========
condition : expression convertible to Boolean
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Gt, Abs
>>> from sympy.codegen import aug_assign, Assignment, While
>>> x, dx = symbols('x dx')
>>> expr = 1 - x**2
>>> whl = While(Gt(Abs(dx), 1e-9), [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx)
... ])
"""
__slots__ = _fields = ('condition', 'body')
_construct_condition = staticmethod(lambda cond: _sympify(cond))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
| While |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 18408,
"end": 19418
} | class ____(TestCase):
def test_json_field(self):
class JSONFieldModel(models.Model):
json_field = models.JSONField()
json_field_with_encoder = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder)
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = JSONFieldModel
fields = ['json_field', 'json_field_with_encoder']
expected = dedent("""
TestSerializer():
json_field = JSONField(decoder=None, encoder=None, style={'base_template': 'textarea.html'})
json_field_with_encoder = JSONField(decoder=<class 'tests.test_model_serializer.CustomJSONDecoder'>, encoder=<class 'django.core.serializers.json.DjangoJSONEncoder'>, style={'base_template': 'textarea.html'})
""")
self.assertEqual(repr(TestSerializer()), expected)
# Tests for relational field mappings.
# ------------------------------------
| TestDjangoJSONFieldMapping |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/exception.py | {
"start": 998,
"end": 1112
} | class ____(TrainerError):
"""
Related to errors with the sampler actions.
"""
pass
| SamplerException |
python | run-llama__llama_index | llama-index-core/llama_index/core/chat_engine/types.py | {
"start": 1506,
"end": 3192
} | class ____:
"""Agent chat response."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
source_nodes: List[NodeWithScore] = field(default_factory=list)
is_dummy_stream: bool = False
metadata: Optional[Dict[str, Any]] = None
def set_source_nodes(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __post_init__(self) -> None:
self.set_source_nodes()
def __str__(self) -> str:
return self.response
@property
def response_gen(self) -> Generator[str, None, None]:
"""Used for fake streaming, i.e. with tool outputs."""
if not self.is_dummy_stream:
raise ValueError(
"response_gen is only available for streaming responses. "
"Set is_dummy_stream=True if you still want a generator."
)
for token in self.response.split(" "):
yield token + " "
time.sleep(0.1)
async def async_response_gen(self) -> AsyncGenerator[str, None]:
"""Used for fake streaming, i.e. with tool outputs."""
if not self.is_dummy_stream:
raise ValueError(
"response_gen is only available for streaming responses. "
"Set is_dummy_stream=True if you still want a generator."
)
for token in self.response.split(" "):
yield token + " "
await asyncio.sleep(0.1)
@dataclass
| AgentChatResponse |
python | huggingface__transformers | tests/models/perceiver/test_image_processing_perceiver.py | {
"start": 1199,
"end": 3572
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
num_images=1,
image_size=18,
min_resolution=30,
max_resolution=40,
do_center_crop=True,
crop_size=None,
do_resize=True,
size=None,
do_rescale=True,
rescale_factor=1 / 255,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
resample=PILImageResampling.BICUBIC,
):
self.crop_size = crop_size if crop_size is not None else {"height": 256, "width": 256}
self.size = size if size is not None else {"height": 224, "width": 224}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.num_images = num_images
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_center_crop = do_center_crop
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"resample": self.resample,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| PerceiverImageProcessingTester |
python | matplotlib__matplotlib | lib/matplotlib/dates.py | {
"start": 59098,
"end": 59985
} | class ____(RRuleLocator):
"""
Make ticks on occurrences of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Parameters
----------
bysecond : int or list of int, default: all seconds
Ticks will be placed on every second in *bysecond*. Default is
``bysecond = range(60)``, i.e., every second.
interval : int, default: 1
The interval between each iteration. For example, if
``interval=2``, mark every second occurrence.
tz : str or `~datetime.tzinfo`, default: :rc:`timezone`
Ticks timezone. If a string, *tz* is passed to `dateutil.tz`.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
super().__init__(rule, tz=tz)
| SecondLocator |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 57948,
"end": 59412
} | class ____(BaseAPIIntegrationTest):
def test_remove_link(self):
# Create containers
container1 = self.client.create_container(
TEST_IMG, 'cat', detach=True, stdin_open=True
)
container1_id = container1['Id']
self.tmp_containers.append(container1_id)
self.client.start(container1_id)
# Create Link
# we don't want the first /
link_path = self.client.inspect_container(container1_id)['Name'][1:]
link_alias = 'mylink'
container2 = self.client.create_container(
TEST_IMG, 'cat', host_config=self.client.create_host_config(
links={link_path: link_alias}
)
)
container2_id = container2['Id']
self.tmp_containers.append(container2_id)
self.client.start(container2_id)
# Remove link
linked_name = self.client.inspect_container(container2_id)['Name'][1:]
link_name = f'{linked_name}/{link_alias}'
self.client.remove_container(link_name, link=True)
# Link is gone
containers = self.client.containers(all=True)
retrieved = [x for x in containers if link_name in x['Names']]
assert len(retrieved) == 0
# Containers are still there
retrieved = [
x for x in containers if x['Id'].startswith(container1_id) or
x['Id'].startswith(container2_id)
]
assert len(retrieved) == 2
| LinkTest |
python | huggingface__transformers | tests/models/kosmos2/test_modeling_kosmos2.py | {
"start": 4026,
"end": 6556
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Kosmos2TextConfig(
vocab_size=self.vocab_size,
embed_dim=self.hidden_size,
layers=self.num_hidden_layers,
attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
| Kosmos2TextModelTester |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 11095,
"end": 11311
} | class ____(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ("test", "body", "elif_", "else_")
test: Node
body: t.List[Node]
elif_: t.List["If"]
else_: t.List[Node]
| If |
python | allegroai__clearml | clearml/backend_config/bucket_config.py | {
"start": 3326,
"end": 3945
} | class ____(object):
def __init__(self, buckets: Optional[List[Any]] = None, *_: Any, **__: Any) -> None:
self._buckets = buckets or []
self._prefixes = None
def _update_prefixes(self, refresh: bool = True) -> None:
if self._prefixes and not refresh:
return
prefixes = ((config, self._get_prefix_from_bucket_config(config)) for config in self._buckets)
self._prefixes = sorted(prefixes, key=itemgetter(1), reverse=True)
@abc.abstractmethod
def _get_prefix_from_bucket_config(self, config: "GSBucketConfig") -> str:
pass
| BaseBucketConfigurations |
python | ansible__ansible | lib/ansible/_internal/_json/_legacy_encoder.py | {
"start": 232,
"end": 1646
} | class ____(_legacy.Encoder):
"""Compatibility wrapper over `legacy` profile JSON encoder to support trust stripping and vault value plaintext conversion."""
def __init__(self, preprocess_unsafe: bool = False, vault_to_text: bool = False, _decode_bytes: bool = False, **kwargs) -> None:
self._preprocess_unsafe = preprocess_unsafe
self._vault_to_text = vault_to_text
self._decode_bytes = _decode_bytes
super().__init__(**kwargs)
def default(self, o: _t.Any) -> _t.Any:
"""Hooked default that can conditionally bypass base encoder behavior based on this instance's config."""
if type(o) is _profiles._WrappedValue: # pylint: disable=unidiomatic-typecheck
o = o.wrapped
if not self._preprocess_unsafe and type(o) is _legacy._Untrusted: # pylint: disable=unidiomatic-typecheck
return o.value # if not emitting unsafe markers, bypass custom unsafe serialization and just return the raw value
if self._vault_to_text and type(o) is _vault.EncryptedString: # pylint: disable=unidiomatic-typecheck
return str(o) # decrypt and return the plaintext (or fail trying)
if self._decode_bytes and isinstance(o, bytes):
return o.decode(errors='surrogateescape') # backward compatibility with `ansible.module_utils.basic.jsonify`
return super().default(o)
| LegacyControllerJSONEncoder |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/blobstore/api/main.py | {
"start": 1099,
"end": 1902
} | class ____(webapp2.RequestHandler):
def get(self):
# [START gae_blobstore_upload_url]
upload_url = blobstore.create_upload_url("/upload_photo")
# [END gae_blobstore_upload_url]
# [START gae_blobstore_upload_form]
# To upload files to the blobstore, the request method must be "POST"
# and enctype must be set to "multipart/form-data".
self.response.out.write(
"""
<html><body>
<form action="{0}" method="POST" enctype="multipart/form-data">
Upload File: <input type="file" name="file"><br>
<input type="submit" name="submit" value="Submit">
</form>
</body></html>""".format(
upload_url
)
)
# [END gae_blobstore_upload_form]
# [START gae_blobstore_upload_handler]
| PhotoUploadFormHandler |
python | sympy__sympy | sympy/codegen/cnodes.py | {
"start": 1768,
"end": 1888
} | class ____(Token):
""" Represents goto in C """
__slots__ = _fields = ('label',)
_construct_label = Label
| goto |
python | django__django | tests/template_tests/syntax_tests/i18n/test_underscore_syntax.py | {
"start": 3154,
"end": 4354
} | class ____(SimpleTestCase):
"""translation of constant strings"""
libraries = {"i18n": "django.templatetags.i18n"}
@setup({"i18n13": '{{ _("Password") }}'})
def test_i18n13(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n13")
self.assertEqual(output, "Passwort")
@setup(
{
"i18n14": (
'{% cycle "foo" _("Password") _(\'Password\') as c %} {% cycle c %} '
"{% cycle c %}"
)
}
)
def test_i18n14(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n14")
self.assertEqual(output, "foo Passwort Passwort")
@setup({"i18n15": '{{ absent|default:_("Password") }}'})
def test_i18n15(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n15", {"absent": ""})
self.assertEqual(output, "Passwort")
@setup({"i18n16": '{{ _("<") }}'})
def test_i18n16(self):
with translation.override("de"):
output = self.engine.render_to_string("i18n16")
self.assertEqual(output, "<")
| I18nStringLiteralTests |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 9102,
"end": 9407
} | class ____(BaseGenerator):
"""A representation of instances of async generators."""
def __init__(
self, async_generator_frame: "state.Frame", ctx: "context.Context"
) -> None:
super().__init__(
ctx.convert.async_generator_type, async_generator_frame, ctx, False
)
| AsyncGenerator |
python | walkccc__LeetCode | solutions/449. Serialize and Deserialize BST/449.py | {
"start": 0,
"end": 1060
} | class ____:
def serialize(self, root: TreeNode | None) -> str:
"""Encodes a tree to a single string."""
if not root:
return ''
chars = []
self._serialize(root, chars)
return ''.join(chars)
def deserialize(self, data: str) -> TreeNode | None:
"""Decodes your encoded data to tree."""
if not data:
return None
q = collections.deque(int(val) for val in data.split())
return self._deserialize(-math.inf, math.inf, q)
def _serialize(self, root: TreeNode | None, chars: list[str]) -> None:
if not root:
return
chars.append(str(root.val))
chars.append(' ')
self._serialize(root.left, chars)
self._serialize(root.right, chars)
def _deserialize(
self,
mn: int,
mx: int,
q: collections.deque[int]
) -> TreeNode | None:
if not q:
return None
val = q[0]
if val < mn or val > mx:
return None
q.popleft()
return TreeNode(val,
self._deserialize(mn, val, q),
self._deserialize(val, mx, q))
| Codec |
python | ray-project__ray | python/ray/data/tests/test_datasink.py | {
"start": 2976,
"end": 4452
} | class ____(Datasink[None]):
"""A writable datasource that logs node IDs of write tasks, for testing."""
def __init__(self, node_id: str):
self.num_ok = 0
self.num_failed = 0
self.node_id = node_id
self.num_rows_written = 0
def write(
self,
blocks: Iterable[Block],
ctx: TaskContext,
) -> None:
node_id = ray.get_runtime_context().get_node_id()
assert node_id == self.node_id
def on_write_complete(self, write_result: WriteResult[None]):
self.num_ok += 1
self.num_rows_written += write_result.num_rows
def on_write_failed(self, error: Exception) -> None:
self.num_failed += 1
def test_write_datasink_ray_remote_args(ray_start_cluster):
ray.shutdown()
cluster = ray_start_cluster
cluster.add_node(
resources={"foo": 100},
num_cpus=1,
)
bar_worker = cluster.add_node(resources={"bar": 100}, num_cpus=1)
bar_node_id = bar_worker.node_id
ray.init(cluster.address)
output = NodeLoggerOutputDatasink(bar_node_id)
ds = ray.data.range(100, override_num_blocks=10)
# Pin write tasks to node with "bar" resource.
ds.write_datasink(output, ray_remote_args={"resources": {"bar": 1}})
assert output.num_ok == 1
assert output.num_failed == 0
assert output.num_rows_written == 100
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| NodeLoggerOutputDatasink |
python | getsentry__sentry | src/sentry/integrations/discord/message_builder/base/embed/footer.py | {
"start": 212,
"end": 823
} | class ____:
def __init__(
self, text: str, icon_url: str | None = None, proxy_icon_url: str | None = None
) -> None:
self.text = text
self.icon_url = icon_url
self.proxy_icon_url = proxy_icon_url
def build(self) -> DiscordMessageEmbedFooterDict:
embed_footer = DiscordMessageEmbedFooterDict(text=self.text)
if self.icon_url is not None:
embed_footer["icon_url"] = self.icon_url
if self.proxy_icon_url is not None:
embed_footer["proxy_icon_url"] = self.proxy_icon_url
return embed_footer
| DiscordMessageEmbedFooter |
python | run-llama__llama_index | llama-index-core/llama_index/core/question_gen/types.py | {
"start": 362,
"end": 435
} | class ____(BaseModel):
sub_question: str
tool_name: str
| SubQuestion |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_types.py | {
"start": 52878,
"end": 53597
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
@testing.combinations(
("sa", sqltypes.Float(), "FLOAT"), # ideally it should render real
("sa", sqltypes.Double(), "DOUBLE PRECISION"),
("sa", sqltypes.FLOAT(), "FLOAT"),
("sa", sqltypes.REAL(), "REAL"),
("sa", sqltypes.DOUBLE(), "DOUBLE"),
("sa", sqltypes.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("mssql", mssql.FLOAT(), "FLOAT"),
("mssql", mssql.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("mssql", mssql.REAL(), "REAL"),
id_="ira",
)
def test_float_type_compile(self, type_, sql_text):
self.assert_compile(type_, sql_text)
| NumberTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-freshdesk/unit_tests/integration/config_builder.py | {
"start": 122,
"end": 821
} | class ____:
def __init__(self) -> None:
self._config = {
"api_key": "fake_api_key",
"domain": "any-domain.freshdesk.com",
"start_date": "2010-01-18T21:18:20Z",
}
def start_date(self, start_date: datetime) -> "ConfigBuilder":
self._config["start_date"] = start_date.strftime("%Y-%m-%dT%H:%M:%SZ")
return self
def api_key(self, api_key: str) -> "ConfigBuilder":
self._config["api_key"] = api_key
return self
def domain(self, domain: str) -> "ConfigBuilder":
self._config["domain"] = domain
return self
def build(self) -> Mapping[str, Any]:
return self._config
| ConfigBuilder |
python | wandb__wandb | wandb/apis/public/registries/_members.py | {
"start": 838,
"end": 1020
} | class ____(ArtifactsBase, arbitrary_types_allowed=True):
kind: Literal[MemberKind.USER] = MemberKind.USER
user: User
role: Union[MemberRole, str] # noqa: UP007
| UserMember |
python | numpy__numpy | numpy/lib/tests/test_function_base.py | {
"start": 7714,
"end": 8949
} | class ____:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
a_copy = np.copy(a)
assert_array_equal(a, a_copy)
a_copy[0, 0] = 10
assert_equal(a[0, 0], 1)
assert_equal(a_copy[0, 0], 10)
def test_order(self):
# It turns out that people rely on np.copy() preserving order by
# default; changing this broke scikit-learn:
# github.com/scikit-learn/scikit-learn/commit/7842748
a = np.array([[1, 2], [3, 4]])
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
a_fort = np.array([[1, 2], [3, 4]], order="F")
assert_(not a_fort.flags.c_contiguous)
assert_(a_fort.flags.f_contiguous)
a_copy = np.copy(a)
assert_(a_copy.flags.c_contiguous)
assert_(not a_copy.flags.f_contiguous)
a_fort_copy = np.copy(a_fort)
assert_(not a_fort_copy.flags.c_contiguous)
assert_(a_fort_copy.flags.f_contiguous)
def test_subok(self):
mx = ma.ones(5)
assert_(not ma.isMaskedArray(np.copy(mx, subok=False)))
assert_(ma.isMaskedArray(np.copy(mx, subok=True)))
# Default behavior
assert_(not ma.isMaskedArray(np.copy(mx)))
| TestCopy |
python | apache__airflow | providers/alibaba/tests/unit/alibaba/cloud/sensors/test_oss_key.py | {
"start": 1610,
"end": 3264
} | class ____:
@mock.patch(f"{MODULE_NAME}.OSSHook")
def test_get_hook(self, mock_service, oss_key_sensor):
oss_key_sensor.hook
mock_service.assert_called_once_with(oss_conn_id=MOCK_OSS_CONN_ID, region=MOCK_REGION)
@mock.patch(f"{MODULE_NAME}.OSSKeySensor.hook", new_callable=PropertyMock)
def test_poke_exsiting_key(self, mock_service, oss_key_sensor):
# Given
mock_service.return_value.object_exists.return_value = True
# When
res = oss_key_sensor.poke(None)
# Then
assert res is True
mock_service.return_value.object_exists.assert_called_once_with(key=MOCK_KEY, bucket_name=MOCK_BUCKET)
@mock.patch(f"{MODULE_NAME}.OSSKeySensor.hook", new_callable=PropertyMock)
def test_poke_non_exsiting_key(self, mock_service, oss_key_sensor):
# Given
mock_service.return_value.object_exists.return_value = False
# When
res = oss_key_sensor.poke(None)
# Then
assert res is False
mock_service.return_value.object_exists.assert_called_once_with(key=MOCK_KEY, bucket_name=MOCK_BUCKET)
@mock.patch(f"{MODULE_NAME}.OSSKeySensor.hook", new_callable=PropertyMock)
def test_poke_without_bucket_name(
self,
mock_service,
oss_key_sensor,
):
# Given
oss_key_sensor.bucket_name = None
mock_service.return_value.object_exists.return_value = False
# When, Then
with pytest.raises(
AirflowException, match="If key is a relative path from root, please provide a bucket_name"
):
oss_key_sensor.poke(None)
| TestOSSKeySensor |
python | PyCQA__pylint | doc/data/messages/n/no-self-use/bad.py | {
"start": 0,
"end": 94
} | class ____:
def greeting(self): # [no-self-use]
print("Greetings pythonista!")
| Person |
python | google__pytype | pytype/overlays/typing_overlay.py | {
"start": 5170,
"end": 5743
} | class ____(abstract.AnnotationClass):
"""Implementation of typing.Final[T]."""
def _build_value(self, node, inner, ellipses):
self.ctx.errorlog.invalid_ellipses(self.ctx.vm.frames, ellipses, self.name)
if len(inner) != 1:
error = "typing.Final must wrap a single type"
self.ctx.errorlog.invalid_annotation(self.ctx.vm.frames, self, error)
return abstract.FinalAnnotation(inner[0], self.ctx)
def instantiate(self, node, container=None):
self.ctx.errorlog.invalid_final_type(self.ctx.vm.frames)
return self.ctx.new_unsolvable(node)
| Final |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 10716,
"end": 11557
} | class ____:
"""Factory class to use when defining near vector queries with multiple vectors in `near_vector()` and `hybrid()` methods."""
@staticmethod
def list_of_vectors(*vectors: V) -> _ListOfVectorsQuery[V]:
"""Define a many-vectors query to be used within a near vector search, i.e. multiple vectors over a single-vector space."""
if len(vectors) > 0 and len(vectors[0]) > 0:
try:
len(cast(Sequence[TwoDimensionalVectorType], vectors)[0][0])
dimensionality: Literal["1D", "2D"] = "2D"
except TypeError:
dimensionality = "1D"
return _ListOfVectorsQuery[V](dimensionality=dimensionality, vectors=vectors)
else:
raise WeaviateInvalidInputError(f"At least one vector must be given, got: {vectors}")
| NearVector |
python | numpy__numpy | numpy/polynomial/tests/test_chebyshev.py | {
"start": 16230,
"end": 16941
} | class ____:
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(-1, 1, 10)
for deg in range(10):
for p in range(deg + 1):
c = cheb.chebinterpolate(powx, deg, (p,))
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
| TestInterpolate |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/test_tokens.py | {
"start": 255,
"end": 6411
} | class ____(TestCase):
# MAC without body/payload or extension
mac_plain = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/resource/1?b=1&a=2',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:dj83hs9s',
'hash_algorithm': 'hmac-sha-1'
}
auth_plain = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:dj83hs9s",'
' mac="SLDJd4mg43cjQfElUs3Qub4L6xE="'
}
# MAC with body/payload, no extension
mac_body = {
'token': 'jd93dh9dh39D',
'uri': 'http://example.com/request',
'key': '8yfrufh348h',
'http_method': 'POST',
'nonce': '273156:di3hvdf8',
'hash_algorithm': 'hmac-sha-1',
'body': 'hello=world%21'
}
auth_body = {
'Authorization': 'MAC id="jd93dh9dh39D", nonce="273156:di3hvdf8",'
' bodyhash="k9kbtCIy0CkI3/FEfpS/oIDjk6k=", mac="W7bdMZbv9UWOTadASIQHagZyirA="'
}
# MAC with body/payload and extension
mac_both = {
'token': 'h480djs93hd8',
'uri': 'http://example.com/request?b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2&a3=2+q',
'key': '489dks293j39',
'http_method': 'GET',
'nonce': '264095:7d8f3e4a',
'hash_algorithm': 'hmac-sha-1',
'body': 'Hello World!',
'ext': 'a,b,c'
}
auth_both = {
'Authorization': 'MAC id="h480djs93hd8", nonce="264095:7d8f3e4a",'
' bodyhash="Lve95gjOVATpfV8EL5X4nxwjKHE=", ext="a,b,c",'
' mac="Z3C2DojEopRDIC88/imW8Ez853g="'
}
# Bearer
token = 'vF9dft4qmT'
uri = 'http://server.example.com/resource'
bearer_headers = {
'Authorization': 'Bearer vF9dft4qmT'
}
valid_bearer_header_lowercase = {"Authorization": "bearer vF9dft4qmT"}
fake_bearer_headers = [
{'Authorization': 'Beaver vF9dft4qmT'},
{'Authorization': 'BeavervF9dft4qmT'},
{'Authorization': 'Beaver vF9dft4qmT'},
{'Authorization': 'BearerF9dft4qmT'},
{'Authorization': 'Bearer vF9d ft4qmT'},
]
valid_header_with_multiple_spaces = {'Authorization': 'Bearer vF9dft4qmT'}
bearer_body = 'access_token=vF9dft4qmT'
bearer_uri = 'http://server.example.com/resource?access_token=vF9dft4qmT'
def _mocked_validate_bearer_token(self, token, scopes, request):
if not token: # noqa: SIM103
return False
return True
def test_prepare_mac_header(self):
"""Verify mac signatures correctness
TODO: verify hmac-sha-256
"""
self.assertEqual(prepare_mac_header(**self.mac_plain), self.auth_plain)
self.assertEqual(prepare_mac_header(**self.mac_body), self.auth_body)
self.assertEqual(prepare_mac_header(**self.mac_both), self.auth_both)
def test_prepare_bearer_request(self):
"""Verify proper addition of bearer tokens to requests.
They may be represented as query components in body or URI or
in a Bearer authorization header.
"""
self.assertEqual(prepare_bearer_headers(self.token), self.bearer_headers)
self.assertEqual(prepare_bearer_body(self.token), self.bearer_body)
self.assertEqual(prepare_bearer_uri(self.token, uri=self.uri), self.bearer_uri)
def test_valid_bearer_is_validated(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
request = Request("/", headers=self.bearer_headers)
result = BearerToken(request_validator=request_validator).validate_request(
request
)
self.assertTrue(result)
def test_lowercase_bearer_is_validated(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
request = Request("/", headers=self.valid_bearer_header_lowercase)
result = BearerToken(request_validator=request_validator).validate_request(
request
)
self.assertTrue(result)
def test_fake_bearer_is_not_validated(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
for fake_header in self.fake_bearer_headers:
request = Request("/", headers=fake_header)
result = BearerToken(request_validator=request_validator).validate_request(
request
)
self.assertFalse(result)
def test_header_with_multispaces_is_validated(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
request = Request("/", headers=self.valid_header_with_multiple_spaces)
result = BearerToken(request_validator=request_validator).validate_request(
request
)
self.assertTrue(result)
def test_estimate_type(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
request = Request("/", headers=self.bearer_headers)
result = BearerToken(request_validator=request_validator).estimate_type(request)
self.assertEqual(result, 9)
def test_estimate_type_with_fake_header_returns_type_0(self):
request_validator = mock.MagicMock()
request_validator.validate_bearer_token = self._mocked_validate_bearer_token
for fake_header in self.fake_bearer_headers:
request = Request("/", headers=fake_header)
result = BearerToken(request_validator=request_validator).estimate_type(
request
)
if (
fake_header["Authorization"].count(" ") == 2
and fake_header["Authorization"].split()[0] == "Bearer"
):
# If we're dealing with the header containing 2 spaces, it will be recognized
# as a Bearer valid header, the token itself will be invalid by the way.
self.assertEqual(result, 9)
else:
self.assertEqual(result, 0)
| TokenTest |
python | readthedocs__readthedocs.org | readthedocs/notifications/messages.py | {
"start": 830,
"end": 18727
} | class ____:
def __init__(self, id, header, body, type, icon_classes=None):
self.id = id
self.header = header
self.body = body
self.type = type # (ERROR, WARNING, INFO, NOTE, TIP)
self.icon_classes = icon_classes
self.format_values = {}
def __repr__(self):
return f"<Message: {self.id}>"
def __str__(self):
return f"Message: {self.id} | {self.header}"
def set_format_values(self, format_values):
self.format_values = format_values
def get_display_icon_classes(self):
if self.icon_classes:
return self.icon_classes
# Default classes that apply to all the notifications
classes = [
"fas",
]
if self.type == ERROR:
classes.append("fa-circle-xmark")
if self.type == WARNING:
classes.append("fa-circle-exclamation")
if self.type == INFO:
classes.append("fa-circle-info")
if self.type == NOTE:
classes.append("fa-circle-info")
if self.type == TIP:
classes.append("fa-circle-info")
return " ".join(classes)
def _prepend_template_prefix(self, template):
"""
Prepend Django {% load %} template tag.
This is required to render the notifications with custom filters/tags.
"""
prefix = "{% load notifications_filters %}"
return prefix + template
def get_rendered_header(self):
template = Template(self._prepend_template_prefix(self.header))
return template.render(context=Context(self.format_values))
def get_rendered_body(self):
template = Template(self._prepend_template_prefix(self.body))
return template.render(context=Context(self.format_values))
BUILD_MESSAGES = [
Message(
id=BuildAppError.GENERIC_WITH_BUILD_ID,
header=_("Unknown problem"),
# Note the message receives the instance it's attached to
# and could be use it to inject related data
body=_(
textwrap.dedent(
"""
There was a problem with Read the Docs while building your documentation.
Please try again later.
If this problem persists,
report this error to us with your build id ({{instance.pk}}).
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildAppError.UPLOAD_FAILED,
header=_("There was a problem while updating your documentation"),
body=_(
textwrap.dedent(
"""
Make sure this project is outputting files to the correct directory, or try again later.
If this problem persists, report this error to us with your build id ({{ instance.pk }}).
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildAppError.BUILD_TERMINATED_DUE_INACTIVITY,
header=_("Build terminated due to inactivity"),
body=_(
textwrap.dedent(
"""
This build was terminated due to inactivity.
If you continue to encounter this error,
file a support request and reference this build id ({{instance.pk}}).
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.GENERIC,
header=_("Unknown problem"),
body=_(
textwrap.dedent(
"""
We encountered a problem with a command while building your project.
To resolve this error, double check your project configuration and installed
dependencies are correct and have not changed recently.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildMaxConcurrencyError.LIMIT_REACHED,
header=_("Maximum concurrency limit reached."),
body=_(
textwrap.dedent(
"""
Your project, organization, or user has reached its maximum number of concurrent builds allowed ({{limit}}).
This build will automatically retry in 5 minutes.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildCancelled.CANCELLED_BY_USER,
header=_("Build cancelled manually."),
body=_(
textwrap.dedent(
"""
The user has cancelled this build.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildCancelled.SKIPPED_EXIT_CODE_183,
header=_("Build skipped."),
body=_(
textwrap.dedent(
"""
This build was skipped because
one of the commands exited with code 183
"""
).strip(),
),
type=INFO,
),
Message(
id=BuildUserError.BUILD_TIME_OUT,
header=_("Build terminated due to time out."),
body=_(
textwrap.dedent(
"""
The build was terminated due to time out.
Read more about <a href="https://docs.readthedocs.io/en/stable/builds.html#build-resources">time and memory limits in our documentation</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_EXCESSIVE_MEMORY,
header=_("Build terminated due to excessive memory consumption."),
body=_(
textwrap.dedent(
"""
This build was terminated due to excessive memory consumption.
Read more about <a href="https://docs.readthedocs.io/en/stable/builds.html#build-resources">time and memory limits in our documentation</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.VCS_DEPRECATED,
header=_("Build used a deprecated VCS is not supported: {{vcs}}."),
body=_(
textwrap.dedent(
"""
{{vcs}} VCS is not supported anymore.
Read more about this in our blog post <a href="https://about.readthedocs.com/blog/2024/02/drop-support-for-subversion-mercurial-bazaar/">Dropping support for Subversion, Mercurial, and Bazaar</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.SSH_KEY_WITH_WRITE_ACCESS,
header=_("Build aborted due to SSH key with write access."),
body=_(
textwrap.dedent(
"""
This build has failed because the current deploy key on the repository was created with write permission.
For protection against abuse we've restricted use of these deploy keys.
A read-only deploy key will need to be set up <b>before December 1st, 2025</b> to continue building this project.
Read more about this in our <a href="https://about.readthedocs.com/blog/2025/07/ssh-keys-with-write-access/">blog post</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildAppError.BUILD_DOCKER_UNKNOWN_ERROR,
header=_("Build terminated due to unknown error."),
body=_(
textwrap.dedent(
"""
This build was terminated due to unknown error: {{message}}
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildAppError.BUILDS_DISABLED,
header=_("Builds are temporary disabled for this project."),
body=_(
textwrap.dedent(
"""
This is due to excessive usage of our resources.
Please, contact our support team if you think this is a mistake
and builds should be re-enabled.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_COMMANDS_WITHOUT_OUTPUT,
header=_("No HTML content found"),
body=_(
textwrap.dedent(
"""
No content was output to the path "$READTHEDOCS_OUTPUT/html".
Read more about <a href="https://docs.readthedocs.io/page/build-customization.html#where-to-put-files">where to put your built files</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_IS_NOT_A_DIRECTORY,
header=_("Build output directory is not a directory"),
body=_(
textwrap.dedent(
"""
Build output directory for format "{{artifact_type}}" is not a directory.
Make sure you created this directory properly when running <code>build.commands</code>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_HAS_0_FILES,
header=_("Build output directory doesn't contain any file"),
body=_(
textwrap.dedent(
"""
Build output directory for format "{{artifact_type}}" does not contain any files.
It seems the build process created the directory but did not save any file to it.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_HAS_MULTIPLE_FILES,
header=_("Build output directory contains multiple files"),
body=_(
textwrap.dedent(
"""
Build output directory for format "{{artifact_type}}" contains multiple files
and it is not currently supported.
Please, remove all the files but the "{{artifact_type}}" you want to upload.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_HTML_NO_INDEX_FILE,
header=_("Index file is not present in HTML output directory"),
body=_(
textwrap.dedent(
"""
Your documentation did not generate an <code>index.html</code> at its root directory.
This is required for documentation serving at the root URL for this version.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_OLD_DIRECTORY_USED,
header=_("Your project is outputing files in an old directory"),
body=_(
textwrap.dedent(
"""
Some files were detected in an unsupported output path: <code>_build/html</code>.
Ensure your project is configured to use the output path
<code>$READTHEDOCS_OUTPUT/html</code> instead.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.NO_CONFIG_FILE_DEPRECATED,
header=_("Your project doesn't have a <code>.readthedocs.yaml</code> file"),
body=_(
textwrap.dedent(
"""
The configuration file required to build documentation is missing from your project.
Add a configuration file to your project to make it build successfully.
Read more in our <a href="https://docs.readthedocs.io/en/stable/config-file/v2.html">configuration file documentation</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_IMAGE_CONFIG_KEY_DEPRECATED,
header=_("Configuration key <code>build.image</code> is deprecated"),
body=_(
textwrap.dedent(
"""
The configuration key <code>build.image</code> is deprecated.
Use <code>build.os</code> instead to continue building your project.
Read more in our <a href="https://docs.readthedocs.io/en/stable/config-file/v2.html#build-os">configuration file documentation</a>.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OS_REQUIRED,
header=_("Configuration key <code>build.os</code> is required"),
body=_(
textwrap.dedent(
"""
The configuration key <code>build.os</code> is required to build your documentation.
Read more in our <a href="https://docs.readthedocs.io/en/stable/config-file/v2.html#build-os">configuration file documentation</a>.
"""
).strip(),
),
type=ERROR,
),
# TODO: consider exposing the name of the file exceeding the size limit.
Message(
id=BuildUserError.FILE_TOO_LARGE,
header=_("There is at least one file that exceeds the size limit"),
body=_(
textwrap.dedent(
"""
A file from your build process is too large to be processed by Read the Docs.
Please ensure no generated files are larger than 1GB.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_OUTPUT_HAS_NO_PDF_FILES,
header=_("There is no PDF file in output directory"),
body=_(
textwrap.dedent(
f"""
PDF file was not generated/found in "{BUILD_COMMANDS_OUTPUT_PATH_HTML}/pdf" output directory.
Make sure the PDF file is saved in this directory.
"""
).strip(),
),
type=ERROR,
),
Message(
id=BuildUserError.BUILD_COMMANDS_IN_BETA,
header=_("Config key <code>build.commands</code> is in beta"),
body=_(
textwrap.dedent(
"""
<strong>The <code>build.commands</code> feature is in beta, and could have backwards incompatible changes while in beta.</strong>
Read more at <a href="https://docs.readthedocs.io/page/build-customization.html#override-the-build-process">our documentation</a> to find out its limitations and potential issues.
"""
).strip(),
),
type=INFO,
),
Message(
id=BuildUserError.TEX_FILE_NOT_FOUND,
header=_("No TeX files were found"),
body=_(
textwrap.dedent(
"""
Read the Docs could not generate a PDF file because the intermediate step generating the TeX file failed.
"""
).strip(),
),
type=ERROR,
),
]
BUILD_MKDOCS_MESSAGES = [
Message(
id=MkDocsYAMLParseError.GENERIC_WITH_PARSE_EXCEPTION,
header=_(""),
body=_(
textwrap.dedent(
"""
Problem parsing MkDocs YAML configuration. {{exception}}
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.INVALID_DOCS_DIR_CONFIG,
header=_("MkDocs <code>docs_dir</code> configuration option is invalid"),
body=_(
textwrap.dedent(
"""
The <code>docs_dir</code> option from your <code>mkdocs.yml</code> configuration file has to be a
string with relative or absolute path.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.INVALID_DOCS_DIR_PATH,
header=_("MkDocs <code>docs_dir</code> path not found"),
body=_(
textwrap.dedent(
"""
The path specified by <code>docs_dir</code> in the <code>mkdocs.yml</code> file does not exist.
Make sure this path is correct.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.INVALID_EXTRA_CONFIG,
header=_("MkDocs <code>{{extra_config}}</code> configuration option is invalid"),
body=_(
textwrap.dedent(
"""
The <code>{{extra_config}}</code> option from your <code>mkdocs.yml</code> configuration file has to be a
list of relative paths.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.EMPTY_CONFIG,
header=_("MkDocs configuration file is empty"),
body=_(
textwrap.dedent(
"""
Please make sure the <code>mkdocs.yml</code> configuration file is not empty.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.NOT_FOUND,
header=_("MkDocs configuration file not found"),
body=_(
textwrap.dedent(
"""
The configuration file for MkDocs was not found.
Make sure the <code>mkdocs.configuration</code> option is correct,
and you have the <code>mkdocs.yml</code> in that location.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.CONFIG_NOT_DICT,
header=_("Unknown error when loading your MkDocs configuration file"),
body=_(
textwrap.dedent(
"""
Your <code>mkdocs.yml</code> configuration file is incorrect.
Please follow the <a href="https://www.mkdocs.org/user-guide/configuration/">official user guide</a>
to configure the file properly.
"""
).strip(),
),
type=ERROR,
),
Message(
id=MkDocsYAMLParseError.SYNTAX_ERROR,
header=_("Syntax error in <code>mkdocs.yml</code>"),
body=_(
textwrap.dedent(
"""
Your <code>mkdocs.yml</code> could not be loaded,
possibly due to a syntax error.
"""
).strip(),
),
type=ERROR,
),
]
| Message |
python | pola-rs__polars | py-polars/src/polars/series/struct.py | {
"start": 726,
"end": 4030
} | class ____:
"""Series.struct namespace."""
_accessor = "struct"
def __init__(self, series: Series) -> None:
self._s: PySeries = series._s
def __getitem__(self, item: int | str) -> Series:
if isinstance(item, int):
return self.field(self.fields[item])
elif isinstance(item, str):
return self.field(item)
else:
msg = f"expected type 'int | str', got {qualified_type_name(item)!r}"
raise TypeError(msg)
def _ipython_key_completions_(self) -> list[str]:
return self.fields
@property
def fields(self) -> list[str]:
"""
Get the names of the fields.
Examples
--------
>>> s = pl.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> s.struct.fields
['a', 'b']
"""
if getattr(self, "_s", None) is None:
return []
return self._s.struct_fields()
def field(self, name: str) -> Series:
"""
Retrieve one of the fields of this `Struct` as a new Series.
Parameters
----------
name
Name of the field.
Examples
--------
>>> s = pl.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> s.struct.field("a")
shape: (2,)
Series: 'a' [i64]
[
1
3
]
"""
def rename_fields(self, names: Sequence[str]) -> Series:
"""
Rename the fields of the struct.
Parameters
----------
names
New names in the order of the struct's fields.
Examples
--------
>>> s = pl.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> s.struct.fields
['a', 'b']
>>> s = s.struct.rename_fields(["c", "d"])
>>> s.struct.fields
['c', 'd']
"""
@property
def schema(self) -> Schema:
"""
Get the struct definition as a name/dtype schema dict.
Examples
--------
>>> s = pl.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> s.struct.schema
Schema({'a': Int64, 'b': Int64})
"""
if getattr(self, "_s", None) is None:
return Schema({})
schema = self._s.dtype().to_schema()
return Schema(schema, check_dtypes=False)
def unnest(self) -> DataFrame:
"""
Convert this struct Series to a DataFrame with a separate column for each field.
Examples
--------
>>> s = pl.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}])
>>> s.struct.unnest()
shape: (2, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ i64 ┆ i64 │
╞═════╪═════╡
│ 1 ┆ 2 │
│ 3 ┆ 4 │
└─────┴─────┘
"""
return wrap_df(self._s.struct_unnest())
def json_encode(self) -> Series:
"""
Convert this struct to a string column with json values.
Examples
--------
>>> s = pl.Series("a", [{"a": [1, 2], "b": [45]}, {"a": [9, 1, 3], "b": None}])
>>> s.struct.json_encode()
shape: (2,)
Series: 'a' [str]
[
"{"a":[1,2],"b":[45]}"
"{"a":[9,1,3],"b":null}"
]
"""
| StructNameSpace |
python | Netflix__metaflow | test/core/metaflow_test/cli_check.py | {
"start": 416,
"end": 8312
} | class ____(MetaflowCheck):
def run_cli(self, args):
cmd = [sys.executable, "test_flow.py"]
# remove --quiet from top level options to capture output from echo
# we will add --quiet in args if needed
cmd.extend([opt for opt in self.cli_options if opt != "--quiet"])
cmd.extend(args)
return subprocess.run(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
)
def assert_artifact(self, step, name, value, fields=None):
for task, artifacts in self.artifact_dict(step, name).items():
if name in artifacts:
artifact = artifacts[name]
if fields:
for field, v in fields.items():
if is_stringish(artifact):
data = json.loads(artifact)
elif isinstance(artifact, IncludedFile):
data = json.loads(artifact.descriptor)
else:
data = artifact
if not isinstance(data, dict):
raise AssertArtifactFailed(
"Task '%s' expected %s to be a dictionary (got %s)"
% (task, name, type(data))
)
if data.get(field, None) != v:
raise AssertArtifactFailed(
"Task '%s' expected %s[%s]=%r but got %s[%s]=%s"
% (
task,
name,
field,
truncate(value),
name,
field,
truncate(data[field]),
)
)
elif artifact != value:
raise AssertArtifactFailed(
"Task '%s' expected %s=%r but got %s=%s"
% (task, name, truncate(value), name, truncate(artifact))
)
else:
raise AssertArtifactFailed(
"Task '%s' expected %s=%s but "
"the key was not found" % (task, name, truncate(value))
)
return True
def artifact_dict(self, step, name):
with NamedTemporaryFile(dir=".") as tmp:
cmd = [
"dump",
"--max-value-size",
"100000000000",
"--private",
"--include",
name,
"--file",
tmp.name,
"%s/%s" % (self.run_id, step),
]
self.run_cli(cmd)
with open(tmp.name, "rb") as f:
# if the step had multiple tasks, this will fail
return pickle.load(f)
def artifact_dict_if_exists(self, step, name):
return self.artifact_dict(step, name)
def assert_log(self, step, logtype, value, exact_match=True):
log = self.get_log(step, logtype)
if (exact_match and log != value) or (not exact_match and value not in log):
raise AssertLogFailed(
"Task '%s/%s' expected %s log '%s' but got '%s'"
% (self.run_id, step, logtype, repr(value), repr(log))
)
return True
def assert_card(
self,
step,
task,
card_type,
value,
card_hash=None,
card_id=None,
exact_match=True,
):
from metaflow.plugins.cards.exception import CardNotPresentException
no_card_found_message = CardNotPresentException.headline
try:
card_data = self.get_card(
step, task, card_type, card_hash=card_hash, card_id=card_id
)
except subprocess.CalledProcessError as e:
if no_card_found_message in e.stderr.decode("utf-8").strip():
card_data = None
else:
raise e
if (exact_match and card_data != value) or (
not exact_match and value not in card_data
):
raise AssertCardFailed(
"Task '%s/%s' expected %s card with content '%s' but got '%s'"
% (self.run_id, step, card_type, repr(value), repr(card_data))
)
return True
def list_cards(self, step, task, card_type=None):
from metaflow.plugins.cards.exception import CardNotPresentException
no_card_found_message = CardNotPresentException.headline
try:
card_data = self._list_cards(step, task=task, card_type=card_type)
except subprocess.CalledProcessError as e:
if no_card_found_message in e.stderr.decode("utf-8").strip():
card_data = None
else:
raise e
return card_data
def _list_cards(self, step, task=None, card_type=None):
with NamedTemporaryFile(dir=".") as f:
pathspec = "%s/%s" % (self.run_id, step)
if task is not None:
pathspec = "%s/%s/%s" % (self.run_id, step, task)
cmd = ["--quiet", "card", "list", pathspec, "--as-json", "--file", f.name]
if card_type is not None:
cmd.extend(["--type", card_type])
self.run_cli(cmd)
with open(f.name, "r") as jsf:
return json.load(jsf)
def get_card(self, step, task, card_type, card_hash=None, card_id=None):
with NamedTemporaryFile(dir=".") as f:
cmd = [
"--quiet",
"card",
"get",
"%s/%s/%s" % (self.run_id, step, task),
f.name,
"--type",
card_type,
]
if card_hash is not None:
cmd.extend(["--hash", card_hash])
if card_id is not None:
cmd.extend(["--id", card_id])
self.run_cli(cmd)
with open(f.name, "r") as jsf:
return jsf.read()
def get_log(self, step, logtype):
cmd = ["--quiet", "logs", "--%s" % logtype, "%s/%s" % (self.run_id, step)]
completed_process = self.run_cli(cmd)
return completed_process.stdout.decode("utf-8")
def get_user_tags(self):
completed_process = self.run_cli(
["tag", "list", "--flat", "--hide-system-tags", "--run-id", self.run_id]
)
lines = completed_process.stderr.decode("utf-8").splitlines()[1:]
return frozenset(lines)
def get_system_tags(self):
completed_process = self.run_cli(
["tag", "list", "--flat", "--run-id", self.run_id]
)
lines = completed_process.stderr.decode("utf-8").splitlines()[1:]
return frozenset(lines) - self.get_user_tags()
def add_tag(self, tag):
self.run_cli(["tag", "add", "--run-id", self.run_id, tag])
def add_tags(self, tags):
self.run_cli(["tag", "add", "--run-id", self.run_id, *tags])
def remove_tag(self, tag):
self.run_cli(["tag", "remove", "--run-id", self.run_id, tag])
def remove_tags(self, tags):
self.run_cli(["tag", "remove", "--run-id", self.run_id, *tags])
def replace_tag(self, tag_to_remove, tag_to_add):
self.run_cli(
["tag", "replace", "--run-id", self.run_id, tag_to_remove, tag_to_add]
)
def replace_tags(self, tags_to_remove, tags_to_add):
cmd = ["tag", "replace", "--run-id", self.run_id]
for tag_to_remove in tags_to_remove:
cmd.extend(["--remove", tag_to_remove])
for tag_to_add in tags_to_add:
cmd.extend(["--add", tag_to_add])
self.run_cli(cmd)
| CliCheck |
python | ipython__ipython | IPython/core/magics/execution.py | {
"start": 4398,
"end": 5411
} | class ____(timeit.Timer):
"""Timer class that explicitly uses self.inner
which is an undocumented implementation detail of CPython,
not shared by PyPy.
"""
# Timer.timeit copied from CPython 3.4.2
def timeit(self, number=timeit.default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
@magics_class
| Timer |
python | bokeh__bokeh | src/bokeh/server/views/session_handler.py | {
"start": 1955,
"end": 6480
} | class ____(AuthRequestHandler):
''' Implements a custom Tornado handler for document display page
'''
application: BokehTornado
request: HTTPServerRequest
application_context: ApplicationContext
bokeh_websocket_path: str
def __init__(self, tornado_app: BokehTornado, *args, **kw) -> None:
self.application_context = kw['application_context']
self.bokeh_websocket_path = kw['bokeh_websocket_path']
# Note: tornado_app is stored as self.application
super().__init__(tornado_app, *args, **kw)
def initialize(self, *args, **kw):
pass
@authenticated
async def get_session(self) -> ServerSession:
app = self.application
token = self.get_argument("bokeh-token", default=None)
session_id: ID | None = self.get_argument("bokeh-session-id", default=None)
if 'Bokeh-Session-Id' in self.request.headers:
if session_id is not None:
log.debug("Server received session ID in request argument and header, expected only one")
raise HTTPError(status_code=403, reason="session ID was provided as an argument and header")
session_id = self.request.headers.get('Bokeh-Session-Id')
if token is not None:
if session_id is not None:
log.debug("Server received both token and session ID, expected only one")
raise HTTPError(status_code=403, reason="Both token and session ID were provided")
session_id = get_session_id(token)
elif session_id is None:
if app.generate_session_ids:
session_id = generate_session_id(secret_key=app.secret_key,
signed=app.sign_sessions)
else:
log.debug("Server configured not to generate session IDs and none was provided")
raise HTTPError(status_code=403, reason="No bokeh-session-id provided")
if token is None:
if app.include_headers is None:
excluded_headers = (app.exclude_headers or [])
allowed_headers = [header for header in self.request.headers
if header not in excluded_headers]
else:
allowed_headers = app.include_headers
headers = {k: v for k, v in self.request.headers.items()
if k in allowed_headers}
if app.include_cookies is None:
excluded_cookies = (app.exclude_cookies or [])
allowed_cookies = [cookie for cookie in self.request.cookies
if cookie not in excluded_cookies]
else:
allowed_cookies = app.include_cookies
cookies = {k: v.value for k, v in self.request.cookies.items()
if k in allowed_cookies}
if cookies and 'Cookie' in headers and 'Cookie' not in (app.include_headers or []):
# Do not include Cookie header since cookies can be restored from cookies dict
del headers['Cookie']
arguments = {} if self.request.arguments is None else self.request.arguments
payload = {'headers': headers, 'cookies': cookies, 'arguments': arguments}
payload.update(self.application_context.application.process_request(self.request))
token = generate_jwt_token(session_id,
secret_key=app.secret_key,
signed=app.sign_sessions,
expiration=app.session_token_expiration,
extra_payload=payload)
if not check_token_signature(token,
secret_key=app.secret_key,
signed=app.sign_sessions):
log.error("Session id had invalid signature: %r", session_id)
raise HTTPError(status_code=403, reason="Invalid token or session ID")
session = await self.application_context.create_session_if_needed(session_id, self.request, token)
return session
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| SessionHandler |
python | mlflow__mlflow | mlflow/protos/unity_catalog_prompt_service_pb2_grpc.py | {
"start": 19975,
"end": 35387
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def CreatePrompt(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/CreatePrompt',
unity__catalog__prompt__messages__pb2.CreatePromptRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.CreatePromptResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def UpdatePrompt(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/UpdatePrompt',
unity__catalog__prompt__messages__pb2.UpdatePromptRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.UpdatePromptResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeletePrompt(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/DeletePrompt',
unity__catalog__prompt__messages__pb2.DeletePromptRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.DeletePromptResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetPrompt(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/GetPrompt',
unity__catalog__prompt__messages__pb2.GetPromptRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.GetPromptResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SearchPrompts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/SearchPrompts',
unity__catalog__prompt__messages__pb2.SearchPromptsRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.SearchPromptsResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def CreatePromptVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/CreatePromptVersion',
unity__catalog__prompt__messages__pb2.CreatePromptVersionRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.CreatePromptVersionResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def UpdatePromptVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/UpdatePromptVersion',
unity__catalog__prompt__messages__pb2.UpdatePromptVersionRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.UpdatePromptVersionResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeletePromptVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/DeletePromptVersion',
unity__catalog__prompt__messages__pb2.DeletePromptVersionRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.DeletePromptVersionResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetPromptVersion(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/GetPromptVersion',
unity__catalog__prompt__messages__pb2.GetPromptVersionRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.GetPromptVersionResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SearchPromptVersions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/SearchPromptVersions',
unity__catalog__prompt__messages__pb2.SearchPromptVersionsRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.SearchPromptVersionsResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetPromptAlias(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/SetPromptAlias',
unity__catalog__prompt__messages__pb2.SetPromptAliasRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.SetPromptAliasResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeletePromptAlias(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/DeletePromptAlias',
unity__catalog__prompt__messages__pb2.DeletePromptAliasRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.DeletePromptAliasResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def GetPromptVersionByAlias(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/GetPromptVersionByAlias',
unity__catalog__prompt__messages__pb2.GetPromptVersionByAliasRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.GetPromptVersionByAliasResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetPromptTag(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/SetPromptTag',
unity__catalog__prompt__messages__pb2.SetPromptTagRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.SetPromptTagResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeletePromptTag(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/DeletePromptTag',
unity__catalog__prompt__messages__pb2.DeletePromptTagRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.DeletePromptTagResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def SetPromptVersionTag(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/SetPromptVersionTag',
unity__catalog__prompt__messages__pb2.SetPromptVersionTagRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.SetPromptVersionTagResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
@staticmethod
def DeletePromptVersionTag(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(
request,
target,
'/mlflow.unitycatalog.UnityCatalogPromptService/DeletePromptVersionTag',
unity__catalog__prompt__messages__pb2.DeletePromptVersionTagRequest.SerializeToString,
unity__catalog__prompt__messages__pb2.DeletePromptVersionTagResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
_registered_method=True)
| UnityCatalogPromptService |
python | astropy__astropy | astropy/coordinates/spectral_coordinate.py | {
"start": 526,
"end": 582
} | class ____(AstropyUserWarning):
pass
| NoVelocityWarning |
python | mlflow__mlflow | mlflow/server/graphql/graphql_schema_extensions.py | {
"start": 772,
"end": 875
} | class ____(graphene.ObjectType):
output = graphene.String(description="Echoes the input string")
| Test |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_external_account_cards.py | {
"start": 3917,
"end": 8387
} | class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_object(_OBJECT).with_limit(100).build(),
_external_accounts_card_response().with_record(_an_external_account_card()).with_record(_an_external_account_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_object(_OBJECT).with_limit(100).build(),
_external_accounts_card_response()
.with_pagination()
.with_record(_an_external_account_card().with_id("last_record_id_from_first_page"))
.build(),
)
http_mocker.get(
_external_accounts_request().with_starting_after("last_record_id_from_first_page").with_object(_OBJECT).with_limit(100).build(),
_external_accounts_card_response().with_record(_an_external_account_card()).with_record(_an_external_account_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_object(_OBJECT).with_limit(100).build(),
_external_accounts_card_response().with_record(_an_external_account_card()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
assert output.records[0].record.data["updated"] == int(_NOW.timestamp())
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing")
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_any_query_params().build(),
[
a_response_with_status(429),
_external_accounts_card_response().with_record(_an_external_account_card()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_any_query_params().build(),
[a_response_with_status(500), _external_accounts_card_response().with_record(_an_external_account_card()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_external_accounts_request().with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=0):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
| FullRefreshTest |
python | skorch-dev__skorch | skorch/tests/callbacks/test_logging.py | {
"start": 32866,
"end": 39907
} | class ____:
@pytest.fixture
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture
def logger_cls(self):
from skorch.callbacks import MlflowLogger
return MlflowLogger
@pytest.fixture
def data(self, classifier_data):
X, y = classifier_data
X, y = X[:40], y[:40]
return X, y
@pytest.fixture
def mock_run(self):
from mlflow.entities import Run
return Mock(Run)
@pytest.fixture
def mock_client(self):
from mlflow.tracking import MlflowClient
return Mock(MlflowClient)
@pytest.fixture
def logger_mock_cls(self, logger_cls, mock_run, mock_client):
return partial(logger_cls, mock_run, mock_client)
@pytest.fixture
def net_builder_cls(self, net_cls, classifier_module, data):
def builder(*args, **kwargs):
return net_cls(classifier_module, *args, **kwargs).fit(*data)
return builder
@pytest.fixture
def net_fitted(self, logger_mock_cls, net_builder_cls):
return net_builder_cls(callbacks=[logger_mock_cls()], max_epochs=3)
def test_run_default(self, monkeypatch, logger_cls, mock_run, mock_client):
import mlflow
mock_active_run = Mock(mlflow.active_run, return_value=mock_run)
monkeypatch.setattr(mlflow, 'active_run', mock_active_run)
logger = logger_cls(client=mock_client).initialize()
assert mock_active_run.called
assert logger.run_ == mock_run
def test_client_default(self, monkeypatch, logger_cls, mock_run, mock_client):
import mlflow.tracking
monkeypatch.setattr(mlflow.tracking, 'MlflowClient', mock_client)
logger = logger_cls(run=mock_run).initialize()
assert mock_client.called
assert logger.client_ == mock_client()
def test_keys_from_history_logged(self, net_fitted, mock_client):
assert mock_client.log_metric.call_count == 3 * 4
keys = {call_args[0][1] for call_args in mock_client.log_metric.call_args_list}
expected = {'dur', 'train_loss', 'valid_loss', 'valid_acc'}
assert keys == expected
def test_ignore_keys(self, logger_mock_cls, net_builder_cls):
# ignore 'dur' and 'valid_loss', 'unknown' doesn't exist but
# this should not cause a problem
logger = logger_mock_cls(keys_ignored=['dur', 'valid_loss', 'unknown'])
net_builder_cls(callbacks=[logger], max_epochs=3)
keys = {
call_args[0][1]
for call_args in logger.client_.log_metric.call_args_list
}
expected = {'train_loss', 'valid_acc'}
assert keys == expected
def test_keys_ignored_is_string(self, logger_mock_cls):
logger = logger_mock_cls(keys_ignored='a-key').initialize()
expected = {'a-key', 'batches'}
assert logger.keys_ignored_ == expected
@pytest.mark.parametrize(
'log_on_batch_end, log_on_epoch_end, batch_suffix, epoch_suffix',
[(False, False, '', ''),
(True, False, '', ''),
(False, True, '', ''),
(True, True, '_batch', '_epoch')]
)
def test_epoch_batch_suffixes_defaults(
self,
logger_mock_cls,
log_on_batch_end,
log_on_epoch_end,
batch_suffix,
epoch_suffix,
):
logger = logger_mock_cls(
log_on_batch_end=log_on_batch_end,
log_on_epoch_end=log_on_epoch_end
).initialize()
assert logger.batch_suffix_ == batch_suffix
assert logger.epoch_suffix_ == epoch_suffix
@pytest.mark.parametrize('batch_suffix', ['', '_foo'])
@pytest.mark.parametrize('epoch_suffix', ['', '_bar'])
def test_epoch_batch_custom_suffix(
self,
logger_mock_cls,
batch_suffix,
epoch_suffix,
):
logger = logger_mock_cls(
log_on_batch_end=True,
log_on_epoch_end=True,
batch_suffix=batch_suffix,
epoch_suffix=epoch_suffix,
).initialize()
assert logger.batch_suffix_ == batch_suffix
assert logger.epoch_suffix_ == epoch_suffix
def test_dont_log_epoch_metrics(self, logger_mock_cls, net_builder_cls):
logger = logger_mock_cls(
log_on_batch_end=True,
log_on_epoch_end=False,
batch_suffix='_batch',
epoch_suffix='_epoch',
)
net_builder_cls(batch_size=10, callbacks=[logger], max_epochs=3)
assert all(
call[0][1].endswith('_batch')
for call in logger.client_.log_metric.call_args_list
)
def test_log_epochs_with_step(self, net_fitted, mock_client):
expected = [x for x in range(1, 4) for _ in range(4)]
actual = [call[1].get('step') for call in mock_client.log_metric.call_args_list]
assert expected == actual
def test_log_batch_with_step(self, logger_mock_cls, net_builder_cls):
logger = logger_mock_cls(log_on_batch_end=True, log_on_epoch_end=False)
net_builder_cls(batch_size=10, callbacks=[logger], max_epochs=4)
expected = [x for x in range(1, 21) for _ in range(2)]
actual = [
call[1].get('step')
for call in logger.client_.log_metric.call_args_list
]
assert expected == actual
def test_artifact_filenames(self, net_fitted, mock_client):
keys = {call_args[0][1].name
for call_args in mock_client.log_artifact.call_args_list}
expected = {'params.pth', 'optimizer.pth', 'criterion.pth', 'history.json'}
assert keys == expected
def test_artifact_in_temporary_directory(self, net_fitted, mock_client):
for call_args in mock_client.log_artifact.call_args_list:
assert str(call_args[0][1]).startswith('/tmp')
def test_dont_create_artifact(self, net_builder_cls, logger_mock_cls):
logger = logger_mock_cls(create_artifact=False)
net_builder_cls(callbacks=[logger], max_epochs=3)
assert not logger.client_.log_artifact.called
def test_run_terminated_automatically(self, net_fitted, mock_client):
assert mock_client.set_terminated.call_count == 1
def test_run_not_closed(self, logger_mock_cls, mock_client, net_builder_cls):
logger = logger_mock_cls(terminate_after_train=False)
net_builder_cls(callbacks=[logger], max_epochs=2)
assert logger.client_.set_terminated.call_count == 0
def test_fit_with_real_run_and_client(self, tmp_path, logger_cls, net_builder_cls):
from mlflow.tracking import MlflowClient
client = MlflowClient(tracking_uri=tmp_path.as_uri())
experiment_name = 'foo'
experiment_id = client.create_experiment(experiment_name)
run = client.create_run(experiment_id)
logger = logger_cls(run, client, create_artifact=False)
net_builder_cls(callbacks=[logger], max_epochs=3)
assert os.listdir(tmp_path)
| TestMLflowLogger |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 91142,
"end": 92803
} | class ____(nn.Module):
def __init__(self, qconfig):
super().__init__()
self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)
self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.sub1 = SubModelForFusion()
self.sub2 = SubModelWithoutFusion()
self.fc = nn.Linear(36, 10).to(dtype=torch.float)
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.qconfig = qconfig
self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)
self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)
self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)
self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)
self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)
self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)
# don't quantize sub2
self.sub2.qconfig = None
self.fc.qconfig = None
def forward(self, x):
x = x.squeeze(2)
x = self.quant(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu4(x)
x = x.unsqueeze(2)
y = x.unsqueeze(2)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.sub1(x)
x = self.dequant(x)
x = self.sub2(x)
x = x.reshape(-1, 36).contiguous()
x = self.fc(x)
y = self.conv2(y)
y = self.relu2(y)
y = self.bn2(y)
y = self.relu3(y)
y = self.dequant(y)
return x
| ModelForFusion |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 220095,
"end": 221014
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.power(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)), getattr(x2, "dtype", type(x2))
)
return KerasTensor(output_shape, dtype=output_dtype)
@keras_export(["keras.ops.power", "keras.ops.numpy.power"])
def power(x1, x2):
"""First tensor elements raised to powers from second tensor, element-wise.
Args:
x1: The bases.
x2: The exponents.
Returns:
Output tensor, the bases in `x1` raised to the exponents in `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Power().symbolic_call(x1, x2)
return backend.numpy.power(x1, x2)
| Power |
python | django__django | django/contrib/auth/forms.py | {
"start": 1683,
"end": 1944
} | class ____(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
kwargs.setdefault("disabled", True)
super().__init__(*args, **kwargs)
| ReadOnlyPasswordHashField |
python | django-import-export__django-import-export | tests/core/tests/admin_integration/test_import_functionality.py | {
"start": 22469,
"end": 24254
} | class ____(AdminTestMixin, TestCase):
"""Handle custom column name import (issue 1822)."""
fixtures = ["author"]
def setUp(self):
super().setUp()
EBookResource._meta.fields = ("id", "author_email", "name", "published_date")
def tearDown(self):
super().tearDown()
EBookResource._meta.fields = ("id", "author_email", "name", "published")
def test_import_preview_order(self):
author_id = Author.objects.first().id
response = self._do_import_post(
self.ebook_import_url,
"ebooks.csv",
input_format="0",
data={"author": author_id},
)
# test header rendered in correct order
target_header_re = (
r"<thead>[\\n\s]+"
r"<tr>[\\n\s]+"
r"<th></th>[\\n\s]+"
r"<th>id</th>[\\n\s]+"
r"<th>Email of the author</th>[\\n\s]+"
r"<th>name</th>[\\n\s]+"
r"<th>published_date</th>[\\n\s]+"
r"<th>Author Name</th>[\\n\s]+"
r"</tr>[\\n\s]+"
"</thead>"
)
self.assertRegex(response.content.decode(), target_header_re)
# test row rendered in correct order
target_row_re = (
r'<tr class="new">[\\n\s]+'
r'<td class="import-type">[\\n\s]+New[\\n\s]+</td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">1</ins></td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">test@example.com</ins></td>[\\n\s]+'
r'<td><ins style="background:#e6ffe6;">Some book</ins></td>[\\n\s]+'
r"<td></td>[\\n\s]+"
r"<td></td>[\\n\s]+"
"</tr>"
)
self.assertRegex(response.content.decode(), target_row_re)
| CustomColumnNameImportTest |
python | celery__celery | celery/apps/multi.py | {
"start": 1557,
"end": 3218
} | class ____:
def __init__(self, args):
self.args = args
self.options = OrderedDict()
self.values = []
self.passthrough = ''
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
rargs = [arg for arg in self.args if arg]
pos = 0
while pos < len(rargs):
arg = rargs[pos]
if arg == '--':
self.passthrough = ' '.join(rargs[pos:])
break
elif arg[0] == '-':
if arg[1] == '-':
self.process_long_opt(arg[2:])
else:
value = None
if len(rargs) > pos + 1 and rargs[pos + 1][0] != '-':
value = rargs[pos + 1]
pos += 1
self.process_short_opt(arg[1:], value)
else:
self.values.append(arg)
pos += 1
def process_long_opt(self, arg, value=None):
if '=' in arg:
arg, value = arg.split('=', 1)
self.add_option(arg, value, short=False)
def process_short_opt(self, arg, value=None):
self.add_option(arg, value, short=True)
def optmerge(self, ns, defaults=None):
if defaults is None:
defaults = self.options
return OrderedDict(defaults, **self.namespaces[ns])
def add_option(self, name, value, short=False, ns=None):
prefix = short and '-' or '--'
dest = self.options
if ':' in name:
name, ns = name.split(':')
dest = self.namespaces[ns]
dest[prefix + name] = value
| NamespacedOptionParser |
python | lazyprogrammer__machine_learning_examples | ab_testing/ucb1_starter.py | {
"start": 630,
"end": 2113
} | class ____:
def __init__(self, p):
# p: the win rate
self.p = p
self.p_estimate = 0.
self.N = 0. # num samples collected so far
def pull(self):
# draw a 1 with probability p
return np.random.random() < self.p
def update(self, x):
self.N += 1.
self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N
def ucb(mean, n, nj):
return # TODO
def run_experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
rewards = np.empty(NUM_TRIALS)
total_plays = 0
# initialization: play each bandit once
for j in range(len(bandits)):
x = bandits[j].pull()
total_plays += 1
bandits[j].update(x)
for i in range(NUM_TRIALS):
j = # TODO
x = bandits[j].pull()
total_plays += 1
bandits[j].update(x)
# for the plot
rewards[i] = x
cumulative_average = np.cumsum(rewards) / (np.arange(NUM_TRIALS) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.xscale('log')
plt.show()
# plot moving average ctr linear
plt.plot(cumulative_average)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.show()
for b in bandits:
print(b.p_estimate)
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num times selected each bandit:", [b.N for b in bandits])
return cumulative_average
if __name__ == '__main__':
run_experiment()
| Bandit |
python | mlflow__mlflow | mlflow/gateway/config.py | {
"start": 2039,
"end": 2248
} | class ____(ConfigModel):
cohere_api_key: str
@field_validator("cohere_api_key", mode="before")
def validate_cohere_api_key(cls, value):
return _resolve_api_key_from_input(value)
| CohereConfig |
python | doocs__leetcode | solution/2500-2599/2593.Find Score of an Array After Marking All Elements/Solution.py | {
"start": 0,
"end": 477
} | class ____:
def findScore(self, nums: List[int]) -> int:
n = len(nums)
vis = [False] * n
q = [(x, i) for i, x in enumerate(nums)]
heapify(q)
ans = 0
while q:
x, i = heappop(q)
ans += x
vis[i] = True
for j in (i - 1, i + 1):
if 0 <= j < n:
vis[j] = True
while q and vis[q[0][1]]:
heappop(q)
return ans
| Solution |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/hooks/sql.py | {
"start": 4519,
"end": 5026
} | class ____(Protocol):
"""Database connection protocol."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
| ConnectorProtocol |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 2303,
"end": 2573
} | class ____(Token):
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
self.value = value
self.plain = plain
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
| ScalarToken |
python | jina-ai__jina | jina/jaml/__init__.py | {
"start": 17916,
"end": 31274
} | class ____(metaclass=JAMLCompatibleType):
""":class:`JAMLCompatible` is a mixin class designed to be used with multiple inheritance.
It will add :meth:`to_yaml` and :meth:`from_yaml` to the target class,
making that class JAML-friendly.
.. warning::
For the sake of cooperative multiple inheritance, do NOT implement :meth:`__init__` for this class
"""
_version = '' #: YAML version number, this will be later overridden if YAML config says the other way
@classmethod
def _to_yaml(cls, representer, data):
"""
A low-level interface required by :mod:`pyyaml` write interface.
.. warning::
This function should not be used directly, please use :meth:`save_config`.
:param representer: the class that will serialize
:param data: the data to serialize
:return: the node's representation
"""
from jina.jaml.parsers import get_parser
config_dict = get_parser(cls, version=data._version).dump(data)
config_dict_with_jtype = {
'jtype': cls.__name__
} # specifies the type of Jina object that is represented
config_dict_with_jtype.update(config_dict)
# To maintain compatibility with off-the-shelf parsers we don't want any tags ('!...') to show up in the output
# Since pyyaml insists on receiving a tag, we need to pass the default map tag. This won't show up in the output
return representer.represent_mapping(
representer.DEFAULT_MAPPING_TAG, config_dict_with_jtype
)
@classmethod
def _from_yaml(cls, constructor: FullConstructor, node):
"""A low-level interface required by :mod:`pyyaml` load interface.
.. warning::
This function should not be used directly, please use :meth:`load_config`.
:param constructor: the class that will construct
:param node: the node to traverse
:return: the parser associated with the class
"""
data = constructor.construct_mapping(node, deep=True)
from jina.jaml.parsers import get_parser
return get_parser(cls, version=data.get('version', None)).parse(
cls, data, runtime_args=constructor.runtime_args
)
def save_config(self, filename: Optional[str] = None):
"""
Save the object's config into a YAML file.
:param filename: file path of the yaml file, if not given then :attr:`config_abspath` is used
"""
f = filename or getattr(self, 'config_abspath', None)
if not f:
f = tempfile.NamedTemporaryFile(
'w',
delete=False,
).name
warnings.warn(
f'no "filename" is given, {self!r}\'s config will be saved to: {f}'
)
with open(f, 'w', encoding='utf-8') as fp:
JAML.dump(self, fp)
@classmethod
def load_config(
cls,
source: Union[str, TextIO, Dict],
*,
allow_py_modules: bool = True,
substitute: bool = True,
context: Optional[Dict[str, Any]] = None,
uses_with: Optional[Dict] = None,
uses_metas: Optional[Dict] = None,
uses_requests: Optional[Dict] = None,
extra_search_paths: Optional[List[str]] = None,
py_modules: Optional[str] = None,
runtime_args: Optional[Dict[str, Any]] = None,
uses_dynamic_batching: Optional[Dict] = None,
needs: Optional[Set[str]] = None,
include_gateway: bool = True,
noblock_on_start: bool = False,
**kwargs,
) -> 'JAMLCompatible':
"""A high-level interface for loading configuration with features
of loading extra py_modules, substitute env & context variables. Any class that
implements :class:`JAMLCompatible` mixin can enjoy this feature, e.g. :class:`BaseFlow`,
:class:`BaseExecutor`, :class:`BaseGateway` and all their subclasses.
Support substitutions in YAML:
- Environment variables: ``${{ ENV.VAR }}`` (recommended), ``$VAR`` (deprecated).
- Context dict (``context``): ``${{ CONTEXT.VAR }}``(recommended), ``${{ VAR }}``.
- Internal reference via ``this`` and ``root``: ``${{this.same_level_key}}``, ``${{root.root_level_key}}``
Substitutions are carried in the order and multiple passes to resolve variables with best effort.
.. highlight:: yaml
.. code-block:: yaml
!BaseEncoder
metas:
name: ${{VAR_A}} # env or context variables
workspace: my-${{this.name}} # internal reference
.. highlight:: python
.. code-block:: python
# load Executor from yaml file
BaseExecutor.load_config('a.yml')
# load Executor from yaml file and substitute environment variables
os.environ['VAR_A'] = 'hello-world'
b = BaseExecutor.load_config('a.yml')
assert b.name == 'hello-world'
# load Executor from yaml file and substitute variables from a dict
b = BaseExecutor.load_config('a.yml', context={'VAR_A': 'hello-world'})
assert b.name == 'hello-world'
# disable substitute
b = BaseExecutor.load_config('a.yml', substitute=False)
.. # noqa: DAR401
:param source: the multi-kind source of the configs.
:param allow_py_modules: allow importing plugins specified by ``py_modules`` in YAML at any levels
:param substitute: substitute environment, internal reference and context variables.
:param context: context replacement variables in a dict, the value of the dict is the replacement.
:param uses_with: dictionary of parameters to overwrite from the default config's with field
:param uses_metas: dictionary of parameters to overwrite from the default config's metas field
:param uses_requests: dictionary of parameters to overwrite from the default config's requests field
:param extra_search_paths: extra paths used when looking for executor yaml files
:param py_modules: Optional py_module from which the object need to be loaded
:param runtime_args: Optional dictionary of parameters runtime_args to be directly passed without being parsed into a yaml config
:param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field
:param needs: the name of the Deployment(s) that this Deployment receives data from. One can also use "gateway" to indicate the connection with the gateway.
:param include_gateway: Defines if the gateway deployment should be included, defaults to True
:param noblock_on_start: If set, starting a Pod/Deployment does not block the thread/process. It then relies on '
'`wait_start_success` at outer function for the postpone check.
:param kwargs: kwargs for parse_config_source
:return: :class:`JAMLCompatible` object
"""
if runtime_args:
kwargs['runtimes_args'] = (
dict()
) # when we have runtime args it is needed to have an empty runtime args session in the yam config
if py_modules:
kwargs['runtimes_args']['py_modules'] = py_modules
if isinstance(source, str) and os.path.exists(source):
extra_search_paths = (extra_search_paths or []) + [os.path.dirname(source)]
stream, s_path = parse_config_source(
source, extra_search_paths=extra_search_paths, **kwargs
)
with stream as fp:
# first load yml with no tag
no_tag_yml = JAML.load_no_tags(fp)
if no_tag_yml:
no_tag_yml.update(**kwargs)
# if there is `override_with` u should make sure that `uses_with` does not remain in the yaml
def _delitem(
obj,
key,
):
value = obj.get(key, None)
if value:
del obj[key]
return
for k, v in obj.items():
if isinstance(v, dict):
_delitem(v, key)
if uses_with is not None:
_delitem(no_tag_yml, key='uses_with')
if uses_metas is not None:
_delitem(no_tag_yml, key='uses_metas')
if uses_requests is not None:
_delitem(no_tag_yml, key='uses_requests')
if uses_dynamic_batching is not None:
_delitem(no_tag_yml, key='uses_dynamic_batching')
cls._override_yml_params(no_tag_yml, 'with', uses_with)
cls._override_yml_params(no_tag_yml, 'metas', uses_metas)
cls._override_yml_params(no_tag_yml, 'requests', uses_requests)
cls._override_yml_params(
no_tag_yml, 'dynamic_batching', uses_dynamic_batching
)
else:
raise BadConfigSource(
f'can not construct {cls} from an empty {source}. nothing to read from there'
)
if substitute:
# expand variables
no_tag_yml = JAML.expand_dict(no_tag_yml, context)
if allow_py_modules:
_extra_search_paths = extra_search_paths or []
load_py_modules(
no_tag_yml,
extra_search_paths=(
(_extra_search_paths + [os.path.dirname(s_path)])
if s_path
else _extra_search_paths
),
)
from jina.enums import DeploymentRoleType
from jina.orchestrate.deployments import Deployment
from jina.orchestrate.flow.base import Flow
if issubclass(cls, Flow):
no_tag_yml_copy = copy.copy(no_tag_yml)
# only needed for Flow
if no_tag_yml_copy.get('with') is None:
no_tag_yml_copy['with'] = {}
no_tag_yml_copy['with']['extra_search_paths'] = (
no_tag_yml_copy['with'].get('extra_search_paths') or []
) + (extra_search_paths or [])
if cls.is_valid_jaml(no_tag_yml_copy):
no_tag_yml = no_tag_yml_copy
tag_yml = JAML.unescape(
JAML.dump(no_tag_yml),
include_unknown_tags=False,
jtype_whitelist=('Flow',),
)
elif issubclass(cls, Deployment):
no_tag_yml['with']['extra_search_paths'] = (
no_tag_yml['with'].get('extra_search_paths') or []
) + (extra_search_paths or [])
no_tag_yml['with']['include_gateway'] = (
no_tag_yml['with'].get('include_gateway') or include_gateway
)
no_tag_yml['with']['noblock_on_start'] = noblock_on_start
no_tag_yml['with']['deployment_role'] = DeploymentRoleType.DEPLOYMENT
if needs:
no_tag_yml['needs'] = list(needs)
tag_yml = JAML.unescape(
JAML.dump(no_tag_yml),
include_unknown_tags=False,
jtype_whitelist=('Deployment',),
)
else:
# revert yaml's tag and load again, this time with substitution
tag_yml = JAML.unescape(JAML.dump(no_tag_yml))
# load into object, no more substitute
obj = JAML.load(tag_yml, substitute=False, runtime_args=runtime_args)
if not isinstance(obj, cls):
raise BadConfigSource(
f'Can not construct {cls} object from {source}. Source might be an invalid configuration.'
)
if type(source) == str:
obj._config_loaded = source
return obj
@classmethod
def _override_yml_params(cls, raw_yaml, field_name, override_field):
if override_field:
field_params = raw_yaml.get(field_name, {})
field_params.update(**override_field)
raw_yaml[field_name] = field_params
@staticmethod
def is_valid_jaml(obj: Dict) -> bool:
"""
Verifies the yaml syntax of a given object by first serializing it and attempting to deserialize and catch
parser errors
:param obj: yaml object
:return: whether the syntax is valid or not
"""
serialized_yaml = JAML.unescape(
JAML.dump(obj),
include_unknown_tags=False,
)
try:
yaml.safe_load(serialized_yaml)
# we only need to validate syntax, e.g, need to detect parser errors
except yaml.parser.ParserError:
return False
except yaml.error.YAMLError:
return True
return True
def _add_runtime_args(self, _runtime_args: Optional[Dict]):
if _runtime_args:
self.runtime_args = SimpleNamespace(**_runtime_args)
else:
self.runtime_args = SimpleNamespace()
| JAMLCompatible |
python | dagster-io__dagster | python_modules/automation/automation/printer.py | {
"start": 208,
"end": 1518
} | class ____(IndentingPrinter):
"""Subclass of IndentingPrinter wrapping a StringIO."""
buffer: StringIO
def __init__(self, indent_level: int = 4, current_indent: int = 0):
self.buffer = StringIO()
self.printer: Callable[[str], Any] = lambda x: self.buffer.write(x + "\n")
super().__init__(
indent_level=indent_level, printer=self.printer, current_indent=current_indent
)
def __enter__(self) -> "IndentingBufferPrinter":
return self
def __exit__(
self,
_exception_type: type[BaseException],
_exception_value: BaseException,
_traceback: list[str],
) -> None:
self.buffer.close()
def read(self) -> str:
"""Get the value of the backing StringIO."""
return self.buffer.getvalue()
def write_header(self) -> None:
args = [os.path.basename(sys.argv[0])] + sys.argv[1:]
self.line('"""NOTE: THIS FILE IS AUTO-GENERATED. DO NOT EDIT.')
self.blank_line()
self.line("@generated")
self.blank_line()
self.line("Produced via:")
self.line("\n\t".join(f"{s} \\" for s in args if s != "--snapshot-update"))
self.blank_line()
self.line('"""')
self.blank_line()
self.blank_line()
| IndentingBufferPrinter |
python | sympy__sympy | sympy/physics/continuum_mechanics/cable.py | {
"start": 393,
"end": 31421
} | class ____:
"""
Cables are structures in engineering that support
the applied transverse loads through the tensile
resistance developed in its members.
Cables are widely used in suspension bridges, tension
leg offshore platforms, transmission lines, and find
use in several other engineering applications.
Examples
========
A cable is supported at (0, 10) and (10, 10). Two point loads
acting vertically downwards act on the cable, one with magnitude 3 kN
and acting 2 meters from the left support and 3 meters below it, while
the other with magnitude 2 kN is 6 meters from the left support and
6 meters below it.
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.apply_load(-1, ('P', 2, 7, 3, 270))
>>> c.apply_load(-1, ('Q', 6, 4, 2, 270))
>>> c.loads
{'distributed': {}, 'point_load': {'P': [3, 270], 'Q': [2, 270]}}
>>> c.loads_position
{'P': [2, 7], 'Q': [6, 4]}
"""
def __init__(self, support_1, support_2):
"""
Initializes the class.
Parameters
==========
support_1 and support_2 are tuples of the form
(label, x, y), where
label : String or symbol
The label of the support
x : Sympifyable
The x coordinate of the position of the support
y : Sympifyable
The y coordinate of the position of the support
"""
self._left_support = []
self._right_support = []
self._supports = {}
self._support_labels = []
self._loads = {"distributed": {}, "point_load": {}}
self._loads_position = {}
self._length = 0
self._reaction_loads = {}
self._tension = {}
self._lowest_x_global = sympify(0)
self._lowest_y_global = sympify(0)
self._cable_eqn = None
self._tension_func = None
if support_1[0] == support_2[0]:
raise ValueError("Supports can not have the same label")
elif support_1[1] == support_2[1]:
raise ValueError("Supports can not be at the same location")
x1 = sympify(support_1[1])
y1 = sympify(support_1[2])
self._supports[support_1[0]] = [x1, y1]
x2 = sympify(support_2[1])
y2 = sympify(support_2[2])
self._supports[support_2[0]] = [x2, y2]
if support_1[1] < support_2[1]:
self._left_support.append(x1)
self._left_support.append(y1)
self._right_support.append(x2)
self._right_support.append(y2)
self._support_labels.append(support_1[0])
self._support_labels.append(support_2[0])
else:
self._left_support.append(x2)
self._left_support.append(y2)
self._right_support.append(x1)
self._right_support.append(y1)
self._support_labels.append(support_2[0])
self._support_labels.append(support_1[0])
for i in self._support_labels:
self._reaction_loads[Symbol("R_"+ i +"_x")] = 0
self._reaction_loads[Symbol("R_"+ i +"_y")] = 0
@property
def supports(self):
"""
Returns the supports of the cable along with their
positions.
"""
return self._supports
@property
def left_support(self):
"""
Returns the position of the left support.
"""
return self._left_support
@property
def right_support(self):
"""
Returns the position of the right support.
"""
return self._right_support
@property
def loads(self):
"""
Returns the magnitude and direction of the loads
acting on the cable.
"""
return self._loads
@property
def loads_position(self):
"""
Returns the position of the point loads acting on the
cable.
"""
return self._loads_position
@property
def length(self):
"""
Returns the length of the cable.
"""
return self._length
@property
def reaction_loads(self):
"""
Returns the reaction forces at the supports, which are
initialized to 0.
"""
return self._reaction_loads
@property
def tension(self):
"""
Returns the tension developed in the cable due to the loads
applied.
"""
return self._tension
def tension_at(self, x):
"""
Returns the tension at a given value of x developed due to
distributed load.
"""
if 'distributed' not in self._tension.keys():
raise ValueError("No distributed load added or solve method not called")
if x > self._right_support[0] or x < self._left_support[0]:
raise ValueError("The value of x should be between the two supports")
A = self._tension['distributed']
X = Symbol('X')
return A.subs({X:(x-self._lowest_x_global)})
def apply_length(self, length):
"""
This method specifies the length of the cable
Parameters
==========
length : Sympifyable
The length of the cable
Examples
========
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.apply_length(20)
>>> c.length
20
"""
dist = ((self._left_support[0] - self._right_support[0])**2
- (self._left_support[1] - self._right_support[1])**2)**(1/2)
if length < dist:
raise ValueError("length should not be less than the distance between the supports")
self._length = length
def change_support(self, label, new_support):
"""
This method changes the mentioned support with a new support.
Parameters
==========
label: String or symbol
The label of the support to be changed
new_support: Tuple of the form (new_label, x, y)
new_label: String or symbol
The label of the new support
x: Sympifyable
The x-coordinate of the position of the new support.
y: Sympifyable
The y-coordinate of the position of the new support.
Examples
========
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.supports
{'A': [0, 10], 'B': [10, 10]}
>>> c.change_support('B', ('C', 5, 6))
>>> c.supports
{'A': [0, 10], 'C': [5, 6]}
"""
if label not in self._supports:
raise ValueError("No support exists with the given label")
i = self._support_labels.index(label)
rem_label = self._support_labels[(i+1)%2]
x1 = self._supports[rem_label][0]
y1 = self._supports[rem_label][1]
x = sympify(new_support[1])
y = sympify(new_support[2])
for l in self._loads_position:
if l[0] >= max(x, x1) or l[0] <= min(x, x1):
raise ValueError("The change in support will throw an existing load out of range")
self._supports.pop(label)
self._left_support.clear()
self._right_support.clear()
self._reaction_loads.clear()
self._support_labels.remove(label)
self._supports[new_support[0]] = [x, y]
if x1 < x:
self._left_support.append(x1)
self._left_support.append(y1)
self._right_support.append(x)
self._right_support.append(y)
self._support_labels.append(new_support[0])
else:
self._left_support.append(x)
self._left_support.append(y)
self._right_support.append(x1)
self._right_support.append(y1)
self._support_labels.insert(0, new_support[0])
for i in self._support_labels:
self._reaction_loads[Symbol("R_"+ i +"_x")] = 0
self._reaction_loads[Symbol("R_"+ i +"_y")] = 0
def apply_load(self, order, load):
"""
This method adds load to the cable.
Parameters
==========
order : Integer
The order of the applied load.
- For point loads, order = -1
- For distributed load, order = 0
load : tuple
* For point loads, load is of the form (label, x, y, magnitude, direction), where:
label : String or symbol
The label of the load
x : Sympifyable
The x coordinate of the position of the load
y : Sympifyable
The y coordinate of the position of the load
magnitude : Sympifyable
The magnitude of the load. It must always be positive
direction : Sympifyable
The angle, in degrees, that the load vector makes with the horizontal
in the counter-clockwise direction. It takes the values 0 to 360,
inclusive.
* For uniformly distributed load, load is of the form (label, magnitude)
label : String or symbol
The label of the load
magnitude : Sympifyable
The magnitude of the load. It must always be positive
Examples
========
For a point load of magnitude 12 units inclined at 30 degrees with the horizontal:
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.apply_load(-1, ('Z', 5, 5, 12, 30))
>>> c.loads
{'distributed': {}, 'point_load': {'Z': [12, 30]}}
>>> c.loads_position
{'Z': [5, 5]}
For a uniformly distributed load of magnitude 9 units:
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.apply_load(0, ('X', 9))
>>> c.loads
{'distributed': {'X': 9}, 'point_load': {}}
"""
if order == -1:
if len(self._loads["distributed"]) != 0:
raise ValueError("Distributed load already exists")
label = load[0]
if label in self._loads["point_load"]:
raise ValueError("Label already exists")
x = sympify(load[1])
y = sympify(load[2])
if x > self._right_support[0] or x < self._left_support[0]:
raise ValueError("The load should be positioned between the supports")
magnitude = sympify(load[3])
direction = sympify(load[4])
self._loads["point_load"][label] = [magnitude, direction]
self._loads_position[label] = [x, y]
elif order == 0:
if len(self._loads_position) != 0:
raise ValueError("Point load(s) already exist")
label = load[0]
if label in self._loads["distributed"]:
raise ValueError("Label already exists")
magnitude = sympify(load[1])
self._loads["distributed"][label] = magnitude
else:
raise ValueError("Order should be either -1 or 0")
def remove_loads(self, *args):
"""
This methods removes the specified loads.
Parameters
==========
This input takes multiple label(s) as input
label(s): String or symbol
The label(s) of the loads to be removed.
Examples
========
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(('A', 0, 10), ('B', 10, 10))
>>> c.apply_load(-1, ('Z', 5, 5, 12, 30))
>>> c.loads
{'distributed': {}, 'point_load': {'Z': [12, 30]}}
>>> c.remove_loads('Z')
>>> c.loads
{'distributed': {}, 'point_load': {}}
"""
for i in args:
if len(self._loads_position) == 0:
if i not in self._loads['distributed']:
raise ValueError("Error removing load " + i + ": no such load exists")
else:
self._loads['disrtibuted'].pop(i)
else:
if i not in self._loads['point_load']:
raise ValueError("Error removing load " + i + ": no such load exists")
else:
self._loads['point_load'].pop(i)
self._loads_position.pop(i)
def solve(self, *args):
"""
This method solves for the reaction forces at the supports, the tension developed in
the cable, and updates the length of the cable.
Parameters
==========
This method requires no input when solving for point loads
For distributed load, the x and y coordinates of the lowest point of the cable are
required as
x: Sympifyable
The x coordinate of the lowest point
y: Sympifyable
The y coordinate of the lowest point
Examples
========
For point loads,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(("A", 0, 10), ("B", 10, 10))
>>> c.apply_load(-1, ('Z', 2, 7.26, 3, 270))
>>> c.apply_load(-1, ('X', 4, 6, 8, 270))
>>> c.solve()
>>> c.tension
{A_Z: 8.91403453669861, X_B: 19*sqrt(13)/10, Z_X: 4.79150773600774}
>>> c.reaction_loads
{R_A_x: -5.25547445255474, R_A_y: 7.2, R_B_x: 5.25547445255474, R_B_y: 3.8}
>>> c.length
5.7560958484519 + 2*sqrt(13)
For distributed load,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c=Cable(("A", 0, 40),("B", 100, 20))
>>> c.apply_load(0, ("X", 850))
>>> c.solve(58.58)
>>> c.tension
{'distributed': 36465.0*sqrt(0.00054335718671383*X**2 + 1)}
>>> c.tension_at(0)
61717.4130533677
>>> c.reaction_loads
{R_A_x: 36465.0, R_A_y: -49793.0, R_B_x: 44399.9537590861, R_B_y: 42868.2071025955}
"""
if len(self._loads_position) != 0:
sorted_position = sorted(self._loads_position.items(), key = lambda item : item[1][0])
sorted_position.append(self._support_labels[1])
sorted_position.insert(0, self._support_labels[0])
self._tension.clear()
moment_sum_from_left_support = 0
moment_sum_from_right_support = 0
F_x = 0
F_y = 0
self._length = 0
tension_func = []
x = symbols('x')
for i in range(1, len(sorted_position)-1):
if i == 1:
self._length+=sqrt((self._left_support[0] - self._loads_position[sorted_position[i][0]][0])**2 + (self._left_support[1] - self._loads_position[sorted_position[i][0]][1])**2)
else:
self._length+=sqrt((self._loads_position[sorted_position[i-1][0]][0] - self._loads_position[sorted_position[i][0]][0])**2 + (self._loads_position[sorted_position[i-1][0]][1] - self._loads_position[sorted_position[i][0]][1])**2)
if i == len(sorted_position)-2:
self._length+=sqrt((self._right_support[0] - self._loads_position[sorted_position[i][0]][0])**2 + (self._right_support[1] - self._loads_position[sorted_position[i][0]][1])**2)
moment_sum_from_left_support += self._loads['point_load'][sorted_position[i][0]][0] * cos(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180) * abs(self._left_support[1] - self._loads_position[sorted_position[i][0]][1])
moment_sum_from_left_support += self._loads['point_load'][sorted_position[i][0]][0] * sin(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180) * abs(self._left_support[0] - self._loads_position[sorted_position[i][0]][0])
F_x += self._loads['point_load'][sorted_position[i][0]][0] * cos(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180)
F_y += self._loads['point_load'][sorted_position[i][0]][0] * sin(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180)
label = Symbol(sorted_position[i][0]+"_"+sorted_position[i+1][0])
y2 = self._loads_position[sorted_position[i][0]][1]
x2 = self._loads_position[sorted_position[i][0]][0]
y1 = 0
x1 = 0
if i == len(sorted_position)-2:
x1 = self._right_support[0]
y1 = self._right_support[1]
else:
x1 = self._loads_position[sorted_position[i+1][0]][0]
y1 = self._loads_position[sorted_position[i+1][0]][1]
angle_with_horizontal = atan((y1 - y2)/(x1 - x2))
tension = -(moment_sum_from_left_support)/(abs(self._left_support[1] - self._loads_position[sorted_position[i][0]][1])*cos(angle_with_horizontal) + abs(self._left_support[0] - self._loads_position[sorted_position[i][0]][0])*sin(angle_with_horizontal))
self._tension[label] = tension
tension_func.append((tension, x<=x1))
moment_sum_from_right_support += self._loads['point_load'][sorted_position[i][0]][0] * cos(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180) * abs(self._right_support[1] - self._loads_position[sorted_position[i][0]][1])
moment_sum_from_right_support += self._loads['point_load'][sorted_position[i][0]][0] * sin(pi * self._loads['point_load'][sorted_position[i][0]][1] / 180) * abs(self._right_support[0] - self._loads_position[sorted_position[i][0]][0])
label = Symbol(sorted_position[0][0]+"_"+sorted_position[1][0])
y2 = self._loads_position[sorted_position[1][0]][1]
x2 = self._loads_position[sorted_position[1][0]][0]
x1 = self._left_support[0]
y1 = self._left_support[1]
angle_with_horizontal = -atan((y2 - y1)/(x2 - x1))
tension = -(moment_sum_from_right_support)/(abs(self._right_support[1] - self._loads_position[sorted_position[1][0]][1])*cos(angle_with_horizontal) + abs(self._right_support[0] - self._loads_position[sorted_position[1][0]][0])*sin(angle_with_horizontal))
self._tension[label] = tension
tension_func.insert(0,(tension, x<=x2))
self._tension_func = Piecewise(*tension_func)
angle_with_horizontal = pi/2 - angle_with_horizontal
label = self._support_labels[0]
self._reaction_loads[Symbol("R_"+label+"_x")] = -sin(angle_with_horizontal) * tension
F_x += -sin(angle_with_horizontal) * tension
self._reaction_loads[Symbol("R_"+label+"_y")] = cos(angle_with_horizontal) * tension
F_y += cos(angle_with_horizontal) * tension
label = self._support_labels[1]
self._reaction_loads[Symbol("R_"+label+"_x")] = -F_x
self._reaction_loads[Symbol("R_"+label+"_y")] = -F_y
elif len(self._loads['distributed']) != 0 :
if len(args) == 0:
raise ValueError("Provide the lowest point of the cable")
lowest_x = sympify(args[0])
self._lowest_x_global = lowest_x
a = Symbol('a', positive=True)
c = Symbol('c')
# augmented matrix form of linsolve
M = Matrix(
[[(self._left_support[0]-lowest_x)**2, 1, self._left_support[1]],
[(self._right_support[0]-lowest_x)**2, 1, self._right_support[1]],
])
coefficient_solution = list(linsolve(M, (a, c)))
if len(coefficient_solution) ==0 or coefficient_solution[0][0]== 0:
raise ValueError("The lowest point is inconsistent with the supports")
A = coefficient_solution[0][0]
C = coefficient_solution[0][1] + coefficient_solution[0][0]*lowest_x**2
B = -2*coefficient_solution[0][0]*lowest_x
self._lowest_y_global = coefficient_solution[0][1]
lowest_y = self._lowest_y_global
# y = A*x**2 + B*x + C
# shifting origin to lowest point
X = Symbol('X')
Y = Symbol('Y')
Y = A*(X + lowest_x)**2 + B*(X + lowest_x) + C - lowest_y
temp_list = list(self._loads['distributed'].values())
applied_force = temp_list[0]
horizontal_force_constant = (applied_force * (self._right_support[0] - lowest_x)**2) / (2 * (self._right_support[1] - lowest_y))
self._tension.clear()
tangent_slope_to_curve = diff(Y, X)
self._tension['distributed'] = horizontal_force_constant / (cos(atan(tangent_slope_to_curve)))
label = self._support_labels[0]
self._reaction_loads[Symbol("R_"+label+"_x")] = self.tension_at(self._left_support[0]) * cos(atan(tangent_slope_to_curve.subs(X, self._left_support[0] - lowest_x)))
self._reaction_loads[Symbol("R_"+label+"_y")] = self.tension_at(self._left_support[0]) * sin(atan(tangent_slope_to_curve.subs(X, self._left_support[0] - lowest_x)))
label = self._support_labels[1]
self._reaction_loads[Symbol("R_"+label+"_x")] = self.tension_at(self._left_support[0]) * cos(atan(tangent_slope_to_curve.subs(X, self._right_support[0] - lowest_x)))
self._reaction_loads[Symbol("R_"+label+"_y")] = self.tension_at(self._left_support[0]) * sin(atan(tangent_slope_to_curve.subs(X, self._right_support[0] - lowest_x)))
def draw(self):
"""
This method is used to obtain a plot for the specified cable with its supports,
shape and loads.
Examples
========
For point loads,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(("A", 0, 10), ("B", 10, 10))
>>> c.apply_load(-1, ('Z', 2, 7.26, 3, 270))
>>> c.apply_load(-1, ('X', 4, 6, 8, 270))
>>> c.solve()
>>> p = c.draw()
>>> p # doctest: +ELLIPSIS
Plot object containing:
[0]: cartesian line: Piecewise((10 - 1.37*x, x <= 2), (8.52 - 0.63*x, x <= 4), (2*x/3 + 10/3, x <= 10)) for x over (0.0, 10.0)
...
>>> p.show()
For uniformly distributed loads,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c=Cable(("A", 0, 40),("B", 100, 20))
>>> c.apply_load(0, ("X", 850))
>>> c.solve(58.58)
>>> p = c.draw()
>>> p # doctest: +ELLIPSIS
Plot object containing:
[0]: cartesian line: 0.0116550116550117*(x - 58.58)**2 + 0.00447086247086247 for x over (0.0, 100.0)
[1]: cartesian line: -7.49552913752915 for x over (0.0, 100.0)
...
>>> p.show()
"""
x = Symbol("x")
annotations = []
support_rectangles = self._draw_supports()
xy_min = min(self._left_support[0],self._lowest_y_global)
xy_max = max(self._right_support[0], max(self._right_support[1],self._left_support[1]))
max_diff = xy_max - xy_min
if len(self._loads_position) != 0:
self._cable_eqn = self._draw_cable(-1)
annotations += self._draw_loads(-1)
elif len(self._loads['distributed']) != 0 :
self._cable_eqn = self._draw_cable(0)
annotations += self._draw_loads(0)
if not self._cable_eqn:
raise ValueError("solve method not called and/or values provided for loads and supports not adequate")
cab_plot = plot(*self._cable_eqn,(x,self._left_support[0],self._right_support[0]),
xlim=(xy_min-0.5*max_diff,xy_max+0.5*max_diff),
ylim=(xy_min-0.5*max_diff,xy_max+0.5*max_diff),
rectangles=support_rectangles,show= False,annotations=annotations, axis=False)
return cab_plot
def _draw_supports(self):
member_rectangles = []
xy_min = min(self._left_support[0],self._lowest_y_global)
xy_max = max(self._right_support[0], max(self._right_support[1],self._left_support[1]))
max_diff = xy_max - xy_min
supp_width = 0.075*max_diff
member_rectangles.append(
{
'xy': (self._left_support[0]-supp_width,self._left_support[1]),
'width': supp_width,
'height':supp_width,
'color':'brown',
'fill': False
}
)
member_rectangles.append(
{
'xy': (self._right_support[0],self._right_support[1]),
'width': supp_width,
'height':supp_width,
'color':'brown',
'fill': False
}
)
return member_rectangles
def _draw_cable(self,order):
xy_min = min(self._left_support[0],self._lowest_y_global)
xy_max = max(self._right_support[0], max(self._right_support[1],self._left_support[1]))
max_diff = xy_max - xy_min
if order == -1 :
x,y = symbols('x y')
line_func = []
sorted_position = sorted(self._loads_position.items(), key = lambda item : item[1][0])
for i in range(len(sorted_position)):
if(i==0):
y = ((sorted_position[i][1][1] - self._left_support[1])*(x-self._left_support[0]))/(sorted_position[i][1][0]- self._left_support[0]) + self._left_support[1]
else:
y = ((sorted_position[i][1][1] - sorted_position[i-1][1][1] )*(x-sorted_position[i-1][1][0]))/(sorted_position[i][1][0]- sorted_position[i-1][1][0]) + sorted_position[i-1][1][1]
line_func.append((y,x<=sorted_position[i][1][0]))
y = ((sorted_position[len(sorted_position)-1][1][1] - self._right_support[1])*(x-self._right_support[0]))/(sorted_position[i][1][0]- self._right_support[0]) + self._right_support[1]
line_func.append((y,x<=self._right_support[0]))
return [Piecewise(*line_func)]
elif order == 0:
x0 = self._lowest_x_global
diff_force_height = max_diff*0.075
a,c,x,y = symbols('a c x y')
parabola_eqn = a*(x-x0)**2 + c - y
points = [(self._left_support[0],self._left_support[1]),(self._right_support[0],self._right_support[1])]
equations = []
for px, py in points:
equations.append(parabola_eqn.subs({x: px, y: py}))
solution = solve(equations, (a, c))
parabola_eqn = solution[a]*(x-x0)**2 + solution[c]
return [parabola_eqn, self._lowest_y_global - diff_force_height]
def _draw_loads(self,order):
xy_min = min(self._left_support[0],self._lowest_y_global)
xy_max = max(self._right_support[0], max(self._right_support[1],self._left_support[1]))
max_diff = xy_max - xy_min
if(order==-1):
arrow_length = max_diff*0.1
force_arrows = []
for key in self._loads['point_load']:
force_arrows.append(
{
'text': '',
'xy':(self._loads_position[key][0]+arrow_length*cos(rad(self._loads['point_load'][key][1])),\
self._loads_position[key][1] + arrow_length*sin(rad(self._loads['point_load'][key][1]))),
'xytext': (self._loads_position[key][0],self._loads_position[key][1]),
'arrowprops': {'width': 1, 'headlength':3, 'headwidth':3 , 'facecolor': 'black', }
}
)
mag = self._loads['point_load'][key][0]
force_arrows.append(
{
'text':f'{mag}N',
'xy': (self._loads_position[key][0]+arrow_length*1.6*cos(rad(self._loads['point_load'][key][1])),\
self._loads_position[key][1] + arrow_length*1.6*sin(rad(self._loads['point_load'][key][1]))),
}
)
return force_arrows
elif (order == 0):
x = symbols('x')
force_arrows = []
x_val = [self._left_support[0] + ((self._right_support[0]-self._left_support[0])/10)*i for i in range(1,10)]
for i in x_val:
force_arrows.append(
{
'text':'',
'xytext':(
i,
self._cable_eqn[0].subs(x,i)
),
'xy':(
i,
self._cable_eqn[1].subs(x,i)
),
'arrowprops':{'width':1, 'headlength':3.5, 'headwidth':3.5, 'facecolor':'black'}
}
)
mag = 0
for key in self._loads['distributed']:
mag += self._loads['distributed'][key]
force_arrows.append(
{
'text':f'{mag} N/m',
'xy':((self._left_support[0]+self._right_support[0])/2,self._lowest_y_global - max_diff*0.15)
}
)
return force_arrows
def plot_tension(self):
"""
Returns the diagram/plot of the tension generated in the cable at various points.
Examples
========
For point loads,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c = Cable(("A", 0, 10), ("B", 10, 10))
>>> c.apply_load(-1, ('Z', 2, 7.26, 3, 270))
>>> c.apply_load(-1, ('X', 4, 6, 8, 270))
>>> c.solve()
>>> p = c.plot_tension()
>>> p
Plot object containing:
[0]: cartesian line: Piecewise((8.91403453669861, x <= 2), (4.79150773600774, x <= 4), (19*sqrt(13)/10, x <= 10)) for x over (0.0, 10.0)
>>> p.show()
For uniformly distributed loads,
>>> from sympy.physics.continuum_mechanics.cable import Cable
>>> c=Cable(("A", 0, 40),("B", 100, 20))
>>> c.apply_load(0, ("X", 850))
>>> c.solve(58.58)
>>> p = c.plot_tension()
>>> p
Plot object containing:
[0]: cartesian line: 36465.0*sqrt(0.00054335718671383*X**2 + 1) for X over (0.0, 100.0)
>>> p.show()
"""
if len(self._loads_position) != 0:
x = symbols('x')
tension_plot = plot(self._tension_func, (x,self._left_support[0],self._right_support[0]), show=False)
else:
X = symbols('X')
tension_plot = plot(self._tension['distributed'], (X,self._left_support[0],self._right_support[0]), show=False)
return tension_plot
| Cable |
python | wandb__wandb | tools/perf/scripts/run_load_tests.py | {
"start": 483,
"end": 6726
} | class ____:
def __init__(
self,
log_folder: str | None = None,
num_of_parallel_runs: int = 1,
data_type: Literal["scalar", "audio", "video", "image", "table"] = "scalar",
):
self.cases = {
"log_scalar": Arguments(
loop_count=4,
step_count=[20_000],
metric_count=[100],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scalar_step_1M": Arguments(
loop_count=1,
step_count=[1_000_000],
metric_count=[100],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scale_step": Arguments(
loop_count=1,
step_count=[1_000, 2_000, 4_000, 8_000],
metric_count=[100],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scale_step_large": Arguments(
loop_count=1,
step_count=[10_000, 20_000, 40_000, 80_000],
metric_count=[100],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scalar_metrics_1M": Arguments(
loop_count=1,
step_count=[1],
metric_count=[1_000_000],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scalar_metrics_100K": Arguments(
loop_count=1,
step_count=[10],
metric_count=[100_000],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scalar_100Ksteps_100Kmetrics": Arguments(
loop_count=1,
step_count=[100_000],
metric_count=[100_000],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scale_metric": Arguments(
loop_count=1,
step_count=[1_000],
metric_count=[1_000, 2_000, 4_000, 8_000],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_scale_metric_large": Arguments(
loop_count=1,
step_count=[10],
metric_count=[10_000, 20_000, 40_000, 80_000, 160_000],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type="scalar",
),
"log_media": Arguments(
loop_count=1,
step_count=[2000],
metric_count=[10],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type=data_type,
),
# this test simulate what MLTraq did on
# https://github.com/elehcimd/mltraq/blob/devel/notebooks/07%20Tracking%20speed%20-%20Benchmarks%20rev1.ipynb
# setup: log different # of steps, each step with
"mltraq_scale_step": Arguments(
loop_count=1,
step_count=[10_000, 50_000, 100_000, 500_000],
metric_count=[1],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type=data_type,
),
"mltraq": Arguments(
loop_count=10,
step_count=[100],
metric_count=[1],
root_folder=log_folder,
num_of_processes=num_of_parallel_runs,
data_type=data_type,
),
}
def run(self, test_case: str):
if test_case not in self.cases:
raise ValueError(f"Test case {test_case} is not found")
argument = self.cases[test_case]
run_perf_tests(
loop_count=argument.loop_count,
num_steps_options=argument.step_count,
num_metrics_options=argument.metric_count,
root_folder=argument.root_folder,
num_processes=argument.num_of_processes,
data_type=argument.data_type,
)
if __name__ == "__main__":
setup_package_logger()
test_cases_instance = TestCases()
parser = argparse.ArgumentParser(description="Run load tests.")
parser.add_argument(
"-t",
"--test-case",
type=str,
required=True,
choices=list(test_cases_instance.cases.keys()),
help="The name of the test case to run",
)
parser.add_argument(
"-d",
"--data-type",
type=str,
default="scalar",
choices=["scalar", "audio", "video", "image", "table"],
help="The wandb data type to log. Default is 'scalar'.",
)
parser.add_argument(
"-n",
"--num-of-parallel-runs",
type=int,
default=1,
help="Number of parallel tests to run. Default is 1.",
)
parser.add_argument(
"-l",
"--log-folder",
type=str,
default=None,
help="The folder to save the logs. Default is current working directory.",
)
args = parser.parse_args()
# Create root folder for test logs
log_folder = args.log_folder
if log_folder is None:
log_folder = datetime.datetime.now().strftime("%m%d%YT%H%M%S")
os.makedirs(log_folder, exist_ok=True)
start_time = time.time()
# Run the specified test case
TestCases(
log_folder=log_folder,
num_of_parallel_runs=args.num_of_parallel_runs,
data_type=args.data_type,
).run(args.test_case)
logger.info(f"Test completed in {time.time() - start_time:.2f}s.")
logger.info(f"Logs saved to {os.getcwd()}/{log_folder}")
| TestCases |
python | google__pytype | pytype/tools/merge_pyi/merge_pyi.py | {
"start": 816,
"end": 2220
} | class ____(cst.CSTTransformer):
"""Transform away every `Any` and `Never` annotations in function returns and variable assignments.
For putting 'Any's, it's basically a no-op, and it doesn't help readability
so better not put anything when pytype gives up.
Having 'Never' annotated on function returns and variables is valid, but
they're most likely wrong if it's inferred by pytype, and it has a chain
effect that all downstream code starts to get treated as unreachable.
"""
def _is_any_or_never(self, annotation: expression.Annotation | None):
return (
annotation
and isinstance(annotation, expression.Name)
and annotation.value in ("Any", "Never")
)
def leave_FunctionDef(
self, original_node: cst.FunctionDef, updated_node: cst.FunctionDef
) -> cst.CSTNode:
if original_node.returns and self._is_any_or_never(
original_node.returns.annotation
):
return updated_node.with_changes(returns=None)
return original_node
def leave_AnnAssign(
self, original_node: cst.AnnAssign, updated_node: cst.AnnAssign
) -> cst.CSTNode:
if self._is_any_or_never(original_node.annotation):
return cst.Assign(
targets=[cst.AssignTarget(target=updated_node.target)],
value=updated_node.value,
semicolon=updated_node.semicolon,
)
return original_node
| RemoveAnyNeverTransformer |
python | django__django | tests/custom_lookups/tests.py | {
"start": 5598,
"end": 6109
} | class ____(models.TextField):
def get_lookup(self, lookup_name):
if lookup_name.startswith("lookupfunc_"):
key, name = lookup_name.split("_", 1)
return SQLFuncFactory(key, name)
return super().get_lookup(lookup_name)
def get_transform(self, lookup_name):
if lookup_name.startswith("transformfunc_"):
key, name = lookup_name.split("_", 1)
return SQLFuncFactory(key, name)
return super().get_transform(lookup_name)
| CustomField |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 20949,
"end": 21839
} | class ____(TestCase):
def test_permission_message_with_no_authentication_classes(self):
"""
An unauthenticated request made against a view that contains no
`authentication_classes` but do contain `permissions_classes` the error
code returned should be 403 with the exception's message.
"""
class DummyPermission(permissions.BasePermission):
message = 'Dummy permission message'
def has_permission(self, request, view):
return False
request = factory.get('/')
view = MockView.as_view(
authentication_classes=(),
permission_classes=(DummyPermission,),
)
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data == {'detail': 'Dummy permission message'}
| NoAuthenticationClassesTests |
python | openai__openai-python | src/openai/types/beta/chatkit/chatkit_response_output_text.py | {
"start": 415,
"end": 602
} | class ____(BaseModel):
filename: str
"""Filename referenced by the annotation."""
type: Literal["file"]
"""Type discriminator that is always `file`."""
| AnnotationFileSource |
python | sympy__sympy | sympy/physics/biomechanics/tests/test_musculotendon.py | {
"start": 2434,
"end": 3019
} | class ____:
@staticmethod
def test_is_abstract_base_class():
assert issubclass(MusculotendonBase, abc.ABC)
@staticmethod
def test_class():
assert issubclass(MusculotendonBase, ForceActuator)
assert issubclass(MusculotendonBase, _NamedMixin)
assert MusculotendonBase.__name__ == 'MusculotendonBase'
@staticmethod
def test_cannot_instantiate_directly():
with pytest.raises(TypeError):
_ = MusculotendonBase()
@pytest.mark.parametrize('musculotendon_concrete', [MusculotendonDeGroote2016])
| TestMusculotendonBase |
python | django-haystack__django-haystack | haystack/backends/__init__.py | {
"start": 1499,
"end": 1736
} | class ____:
hits = 0
docs = []
def __len__(self):
return 0
def __getitem__(self, k):
if isinstance(k, slice):
return []
else:
raise IndexError("It's not here.")
| EmptyResults |
python | huggingface__transformers | src/transformers/models/yolos/modeling_yolos.py | {
"start": 8201,
"end": 10850
} | class ____(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2)
return embeddings
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->Yolos
| YolosPatchEmbeddings |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 646787,
"end": 647427
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("TeamDiscussionEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("TeamDiscussion"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| TeamDiscussionConnection |
python | python-openxml__python-docx | tests/oxml/test_xmlchemy.py | {
"start": 20463,
"end": 23781
} | class ____:
def it_adds_a_getter_property_for_the_child_element(self, getter_fixture):
parent, zooChild = getter_fixture
assert parent.zooChild is zooChild
def it_adds_an_add_method_for_the_child_element(self, add_fixture):
parent, expected_xml = add_fixture
zooChild = parent._add_zooChild()
assert parent.xml == expected_xml
assert isinstance(zooChild, CT_ZooChild)
assert parent._add_zooChild.__doc__.startswith("Add a new ``<w:zooChild>`` child element ")
def it_adds_an_insert_method_for_the_child_element(self, insert_fixture):
parent, zooChild, expected_xml = insert_fixture
parent._insert_zooChild(zooChild)
assert parent.xml == expected_xml
assert parent._insert_zooChild.__doc__.startswith("Return the passed ``<w:zooChild>`` ")
def it_adds_a_get_or_add_method_for_the_child_element(self, get_or_add_fixture):
parent, expected_xml = get_or_add_fixture
zooChild = parent.get_or_add_zooChild()
assert isinstance(zooChild, CT_ZooChild)
assert parent.xml == expected_xml
def it_adds_a_remover_method_for_the_child_element(self, remove_fixture):
parent, expected_xml = remove_fixture
parent._remove_zooChild()
assert parent.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture
def add_fixture(self):
parent = self.parent_bldr(False).element
expected_xml = self.parent_bldr(True).xml()
return parent, expected_xml
@pytest.fixture(params=[True, False])
def getter_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
zooChild = parent.find(qn("w:zooChild")) # None if not found
return parent, zooChild
@pytest.fixture(params=[True, False])
def get_or_add_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
expected_xml = self.parent_bldr(True).xml()
return parent, expected_xml
@pytest.fixture
def insert_fixture(self):
parent = (
a_parent()
.with_nsdecls()
.with_child(an_oomChild())
.with_child(an_oooChild())
.with_child(a_zomChild())
).element
zooChild = a_zooChild().with_nsdecls().element
expected_xml = (
a_parent()
.with_nsdecls()
.with_child(an_oomChild())
.with_child(an_oooChild())
.with_child(a_zomChild())
.with_child(a_zooChild())
).xml()
return parent, zooChild, expected_xml
@pytest.fixture(params=[True, False])
def remove_fixture(self, request):
zooChild_is_present = request.param
parent = self.parent_bldr(zooChild_is_present).element
expected_xml = self.parent_bldr(False).xml()
return parent, expected_xml
# fixture components ---------------------------------------------
def parent_bldr(self, zooChild_is_present):
parent_bldr = a_parent().with_nsdecls()
if zooChild_is_present:
parent_bldr.with_child(a_zooChild())
return parent_bldr
| DescribeZeroOrOne |
python | huggingface__transformers | src/transformers/models/seed_oss/configuration_seed_oss.py | {
"start": 822,
"end": 9421
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SeedOssModel`]. It is used to instantiate an SeedOss
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the SeedOss-36B.
e.g. [ByteDance-Seed/SeedOss-36B](https://huggingface.co/ByteDance-Seed/SeedOss-36B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 155136):
Vocabulary size of the SeedOss model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`SeedOssModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 27648):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 64):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 80):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 524288):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value layers during self-attention.
attention_out_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the output projection layer during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
residual_dropout (`float`, *optional*, defaults to 0.1):
Residual connection dropout value.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*, defaults to 128):
The attention head dimension.
```python
>>> from transformers import SeedOssModel, SeedOssConfig
>>> # Initializing a SeedOss-36b style configuration
>>> configuration = SeedOssConfig()
>>> # Initializing a model from the SeedOss-36b style configuration
>>> model = SeedOssModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "seed_oss"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `SeedOssModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 155136,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 27648,
num_hidden_layers: Optional[int] = 64,
num_attention_heads: Optional[int] = 80,
num_key_value_heads: Optional[int] = 8,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 524288,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[float] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 2,
pretraining_tp: Optional[int] = 1,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
attention_bias: Optional[bool] = True,
attention_out_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.1,
residual_dropout: Optional[float] = 0.1,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = 128,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_out_bias = attention_out_bias
self.attention_dropout = attention_dropout
self.residual_dropout = residual_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["SeedOssConfig"]
| SeedOssConfig |
python | ansible__ansible | hacking/azp/incidental.py | {
"start": 16777,
"end": 16861
} | class ____(Exception):
pass
if __name__ == '__main__':
main()
| ApplicationError |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 77686,
"end": 79851
} | class ____(object):
def __init__(self, context, handle, finalizer=None):
self.context = context
self.handle = handle
if finalizer is not None:
weakref.finalize(self, finalizer)
def query(self):
"""
Returns True if all work before the most recent record has completed;
otherwise, returns False.
"""
try:
driver.cuEventQuery(self.handle)
except CudaAPIError as e:
if e.code == enums.CUDA_ERROR_NOT_READY:
return False
else:
raise
else:
return True
def record(self, stream=0):
"""
Set the record point of the event to the current point in the given
stream.
The event will be considered to have occurred when all work that was
queued in the stream at the time of the call to ``record()`` has been
completed.
"""
if USE_NV_BINDING:
hstream = stream.handle if stream else binding.CUstream(0)
else:
hstream = stream.handle if stream else 0
driver.cuEventRecord(self.handle, hstream)
def synchronize(self):
"""
Synchronize the host thread for the completion of the event.
"""
driver.cuEventSynchronize(self.handle)
def wait(self, stream=0):
"""
All future works submitted to stream will wait util the event completes.
"""
if USE_NV_BINDING:
hstream = stream.handle if stream else binding.CUstream(0)
else:
hstream = stream.handle if stream else 0
flags = 0
driver.cuStreamWaitEvent(hstream, self.handle, flags)
def elapsed_time(self, evtend):
return event_elapsed_time(self, evtend)
def event_elapsed_time(evtstart, evtend):
'''
Compute the elapsed time between two events in milliseconds.
'''
if USE_NV_BINDING:
return driver.cuEventElapsedTime(evtstart.handle, evtend.handle)
else:
msec = c_float()
driver.cuEventElapsedTime(byref(msec), evtstart.handle, evtend.handle)
return msec.value
| Event |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 42940,
"end": 46915
} | class ____(Base):
"""
DB model for evaluation datasets.
"""
__tablename__ = "evaluation_datasets"
dataset_id = Column(String(36), primary_key=True)
"""
Dataset ID: `String` (limit 36 characters).
*Primary Key* for ``evaluation_datasets`` table.
"""
name = Column(String(255), nullable=False)
"""
Dataset name: `String` (limit 255 characters). *Non null* in table schema.
"""
schema = Column(Text, nullable=True)
"""
Schema information: `Text`.
"""
profile = Column(Text, nullable=True)
"""
Profile information: `Text`.
"""
digest = Column(String(64), nullable=True)
"""
Dataset digest: `String` (limit 64 characters).
"""
created_time = Column(BigInteger, default=get_current_time_millis)
"""
Creation time: `BigInteger`.
"""
last_update_time = Column(BigInteger, default=get_current_time_millis)
"""
Last update time: `BigInteger`.
"""
created_by = Column(String(255), nullable=True)
"""
Creator user ID: `String` (limit 255 characters).
"""
last_updated_by = Column(String(255), nullable=True)
"""
Last updater user ID: `String` (limit 255 characters).
"""
records = relationship(
"SqlEvaluationDatasetRecord", back_populates="dataset", cascade="all, delete-orphan"
)
tags = relationship(
"SqlEvaluationDatasetTag",
cascade="all, delete-orphan",
lazy="selectin",
)
__table_args__ = (
PrimaryKeyConstraint("dataset_id", name="evaluation_datasets_pk"),
Index("index_evaluation_datasets_name", "name"),
Index("index_evaluation_datasets_created_time", "created_time"),
)
def to_mlflow_entity(self):
"""
Convert DB model to corresponding MLflow entity.
Returns:
:py:class:`mlflow.entities.EvaluationDataset`.
"""
records = None
# NB: Using SQLAlchemy's inspect module to determine if the field is loaded
# or not as calling .records on the EvaluationDataset object will trigger
# lazy-loading of the records.
state = inspect(self)
if "records" in state.dict:
records = [record.to_mlflow_entity() for record in self.records]
# Convert tags from relationship to dict
# Since we use lazy="selectin", tags are always loaded
# Return empty dict if no tags exist
tags_dict = {tag.key: tag.value for tag in self.tags}
dataset = EvaluationDataset(
dataset_id=self.dataset_id,
name=self.name,
tags=tags_dict,
schema=self.schema,
profile=self.profile,
digest=self.digest,
created_time=self.created_time,
last_update_time=self.last_update_time,
created_by=self.created_by,
last_updated_by=self.last_updated_by,
# experiment_ids will be loaded lazily when accessed
)
if records is not None:
dataset._records = records
return dataset
@classmethod
def from_mlflow_entity(cls, dataset: EvaluationDataset):
"""
Create SqlEvaluationDataset from EvaluationDataset entity.
Args:
dataset: EvaluationDataset entity
Returns:
SqlEvaluationDataset instance
"""
# Note: tags are not set here - they are handled as
# SqlEvaluationDatasetTag objects
return cls(
dataset_id=dataset.dataset_id,
name=dataset.name,
schema=dataset.schema,
profile=dataset.profile,
digest=dataset.digest,
created_time=dataset.created_time or get_current_time_millis(),
last_update_time=dataset.last_update_time or get_current_time_millis(),
created_by=dataset.created_by,
last_updated_by=dataset.last_updated_by,
)
| SqlEvaluationDataset |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0115_add_addonsconfig_history.py | {
"start": 280,
"end": 5288
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("projects", "0114_set_timestamp_fields_as_no_null"),
]
operations = [
migrations.CreateModel(
name="HistoricalAddonsConfig",
fields=[
(
"id",
models.IntegerField(
auto_created=True, blank=True, db_index=True, verbose_name="ID"
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
(
"extra_history_user_id",
models.IntegerField(blank=True, db_index=True, null=True, verbose_name="ID"),
),
(
"extra_history_user_username",
models.CharField(
db_index=True,
max_length=150,
null=True,
verbose_name="username",
),
),
(
"extra_history_ip",
models.CharField(
blank=True, max_length=250, null=True, verbose_name="IP address"
),
),
(
"extra_history_browser",
models.CharField(
blank=True,
max_length=250,
null=True,
verbose_name="Browser user-agent",
),
),
(
"enabled",
models.BooleanField(
default=True,
help_text="Enable/Disable all the addons on this project",
),
),
("analytics_enabled", models.BooleanField(default=False)),
("doc_diff_enabled", models.BooleanField(default=True)),
("doc_diff_show_additions", models.BooleanField(default=True)),
("doc_diff_show_deletions", models.BooleanField(default=True)),
(
"doc_diff_root_selector",
models.CharField(blank=True, max_length=128, null=True),
),
("external_version_warning_enabled", models.BooleanField(default=True)),
("ethicalads_enabled", models.BooleanField(default=True)),
("flyout_enabled", models.BooleanField(default=True)),
("hotkeys_enabled", models.BooleanField(default=True)),
("search_enabled", models.BooleanField(default=True)),
(
"search_default_filter",
models.CharField(blank=True, max_length=128, null=True),
),
(
"stable_latest_version_warning_enabled",
models.BooleanField(default=True),
),
("history_id", models.AutoField(primary_key=True, serialize=False)),
("history_date", models.DateTimeField()),
("history_change_reason", models.CharField(max_length=100, null=True)),
(
"history_type",
models.CharField(
choices=[("+", "Created"), ("~", "Changed"), ("-", "Deleted")],
max_length=1,
),
),
(
"history_user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to=settings.AUTH_USER_MODEL,
),
),
(
"project",
models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="projects.project",
),
),
],
options={
"verbose_name": "historical addons config",
"ordering": ("-history_date", "-history_id"),
"get_latest_by": "history_date",
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
]
| Migration |
python | pyca__cryptography | tests/hazmat/primitives/test_kbkdf.py | {
"start": 14619,
"end": 27576
} | class ____:
_KEY_MATERIAL = bytes(32)
_KEY_MATERIAL2 = _KEY_MATERIAL.replace(b"\x00", b"\x01", 1)
def test_invalid_key(self, backend):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
key = kdf.derive(self._KEY_MATERIAL)
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
with pytest.raises(InvalidKey):
kdf.verify(self._KEY_MATERIAL2, key)
def test_already_finalized(self, backend):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
kdf.derive(self._KEY_MATERIAL)
with pytest.raises(AlreadyFinalized):
kdf.derive(self._KEY_MATERIAL2)
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
key = kdf.derive(self._KEY_MATERIAL)
with pytest.raises(AlreadyFinalized):
kdf.verify(self._KEY_MATERIAL, key)
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
kdf.verify(self._KEY_MATERIAL, key)
with pytest.raises(AlreadyFinalized):
kdf.verify(self._KEY_MATERIAL, key)
def test_key_length(self, backend):
error = OverflowError if sys.maxsize <= 2**31 else ValueError
with pytest.raises(error):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
85899345920,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_rlen(self, backend):
with pytest.raises(ValueError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
5,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_r_type(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
b"r", # type: ignore[arg-type]
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_zero_llen(self, backend):
with pytest.raises(ValueError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
0,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_l_type(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
b"l", # type: ignore[arg-type]
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_l(self, backend):
with pytest.raises(ValueError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
None,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_unsupported_mode(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
None, # type: ignore[arg-type]
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_unsupported_location(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
None, # type: ignore[arg-type]
b"label",
b"context",
None,
backend=backend,
)
def test_unsupported_parameters(self, backend):
with pytest.raises(ValueError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
b"fixed",
backend=backend,
)
def test_missing_break_location(self, backend):
with pytest.raises(
ValueError, match=re.escape("Please specify a break_location")
):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.MiddleFixed,
b"label",
b"context",
None,
backend=backend,
)
with pytest.raises(
ValueError, match=re.escape("Please specify a break_location")
):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.MiddleFixed,
b"label",
b"context",
None,
backend=backend,
break_location=None,
)
def test_keyword_only_break_location(self, backend):
with pytest.raises(
TypeError, match=r"\d+ positional arguments but \d+ were given\Z"
):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.MiddleFixed,
b"label",
b"context",
None,
backend,
0, # type: ignore[misc]
)
def test_invalid_break_location(self, backend):
with pytest.raises(OverflowError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.MiddleFixed,
b"label",
b"context",
None,
backend=backend,
break_location=-1,
)
with pytest.raises(
ValueError, match=re.escape("break_location offset > len(fixed)")
):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.MiddleFixed,
b"label",
b"context",
None,
backend=backend,
break_location=18,
)
kdf.derive(b"32 bytes long input key material")
def test_ignored_break_location_before(self, backend):
with pytest.raises(
ValueError,
match=re.escape(
"break_location is ignored when location is not"
" CounterLocation.MiddleFixed"
),
):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
break_location=0,
)
def test_ignored_break_location_after(self, backend):
with pytest.raises(
ValueError,
match=re.escape(
"break_location is ignored when location is not"
" CounterLocation.MiddleFixed"
),
):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.AfterFixed,
b"label",
b"context",
None,
backend=backend,
break_location=0,
)
def test_unsupported_algorithm(self, backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
KBKDFCMAC(
DummyCipherAlgorithm,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
KBKDFCMAC(
algorithms.ChaCha20,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
def test_unicode_error_label(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
"label", # type: ignore[arg-type]
b"context",
None,
backend=backend,
)
def test_unicode_error_context(self, backend):
with pytest.raises(TypeError):
KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
"context", # type: ignore[arg-type]
None,
backend=backend,
)
def test_unsupported_cipher(self, backend):
kdf = KBKDFCMAC(
DummyBlockCipherAlgorithm,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
kdf.derive(self._KEY_MATERIAL)
def test_unicode_error_key_material(self, backend):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
with pytest.raises(TypeError):
kdf.derive("material") # type: ignore[arg-type]
def test_wrong_key_material_length(self, backend):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
32,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
with pytest.raises(ValueError):
kdf.derive(b"material")
def test_buffer_protocol(self, backend):
kdf = KBKDFCMAC(
algorithms.AES,
Mode.CounterMode,
10,
4,
4,
CounterLocation.BeforeFixed,
b"label",
b"context",
None,
backend=backend,
)
key = kdf.derive(bytearray(self._KEY_MATERIAL))
assert key == b"\x19\xcd\xbe\x17Lb\x115<\xd0"
| TestKBKDFCMAC |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.