language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_data_bar11.py | {
"start": 345,
"end": 8456
} | class ____(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with conditional formatting."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.select()
worksheet.index = 0
worksheet.conditional_format(
"A1",
{
"type": "data_bar",
"data_bar_2010": True,
"min_type": "formula",
"min_value": "=$B$1",
},
)
worksheet.conditional_format(
"A2:B2",
{
"type": "data_bar",
"bar_color": "#63C384",
"data_bar_2010": True,
"min_type": "formula",
"min_value": "=$B$1",
"max_type": "formula",
"max_value": "=$C$1",
},
)
worksheet.conditional_format(
"A3:C3",
{
"type": "data_bar",
"bar_color": "#FF555A",
"data_bar_2010": True,
"min_type": "percentile",
"max_type": "percentile",
"min_value": 10,
"max_value": 90,
},
)
worksheet._assemble_xml_file()
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006" xmlns:x14ac="http://schemas.microsoft.com/office/spreadsheetml/2009/9/ac" mc:Ignorable="x14ac">
<dimension ref="A1"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15" x14ac:dyDescent="0.25"/>
<sheetData/>
<conditionalFormatting sqref="A1">
<cfRule type="dataBar" priority="1">
<dataBar>
<cfvo type="formula" val="$B$1"/>
<cfvo type="max"/>
<color rgb="FF638EC6"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000001}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A2:B2">
<cfRule type="dataBar" priority="2">
<dataBar>
<cfvo type="formula" val="$B$1"/>
<cfvo type="formula" val="$C$1"/>
<color rgb="FF63C384"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000002}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<conditionalFormatting sqref="A3:C3">
<cfRule type="dataBar" priority="3">
<dataBar>
<cfvo type="percentile" val="10"/>
<cfvo type="percentile" val="90"/>
<color rgb="FFFF555A"/>
</dataBar>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{B025F937-C7B1-47D3-B67F-A62EFF666E3E}">
<x14:id>{DA7ABA51-AAAA-BBBB-0001-000000000003}</x14:id>
</ext>
</extLst>
</cfRule>
</conditionalFormatting>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
<extLst>
<ext xmlns:x14="http://schemas.microsoft.com/office/spreadsheetml/2009/9/main" uri="{78C0D931-6437-407d-A8EE-F0AAD7539E65}">
<x14:conditionalFormattings>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000001}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="formula">
<xm:f>$B$1</xm:f>
</x14:cfvo>
<x14:cfvo type="autoMax"/>
<x14:borderColor rgb="FF638EC6"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A1</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000002}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="formula">
<xm:f>$B$1</xm:f>
</x14:cfvo>
<x14:cfvo type="formula">
<xm:f>$C$1</xm:f>
</x14:cfvo>
<x14:borderColor rgb="FF63C384"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A2:B2</xm:sqref>
</x14:conditionalFormatting>
<x14:conditionalFormatting xmlns:xm="http://schemas.microsoft.com/office/excel/2006/main">
<x14:cfRule type="dataBar" id="{DA7ABA51-AAAA-BBBB-0001-000000000003}">
<x14:dataBar minLength="0" maxLength="100" border="1" negativeBarBorderColorSameAsPositive="0">
<x14:cfvo type="percentile">
<xm:f>10</xm:f>
</x14:cfvo>
<x14:cfvo type="percentile">
<xm:f>90</xm:f>
</x14:cfvo>
<x14:borderColor rgb="FFFF555A"/>
<x14:negativeFillColor rgb="FFFF0000"/>
<x14:negativeBorderColor rgb="FFFF0000"/>
<x14:axisColor rgb="FF000000"/>
</x14:dataBar>
</x14:cfRule>
<xm:sqref>A3:C3</xm:sqref>
</x14:conditionalFormatting>
</x14:conditionalFormattings>
</ext>
</extLst>
</worksheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleWorksheet |
python | getsentry__sentry | src/sentry/snuba/rpc_dataset_common.py | {
"start": 2492,
"end": 2941
} | class ____:
query_string: str
selected_columns: list[str]
orderby: list[str] | None
offset: int
limit: int
referrer: str
sampling_mode: SAMPLING_MODES | None
resolver: SearchResolver
equations: list[str] | None = None
name: str | None = None
page_token: PageToken | None = None
additional_queries: AdditionalQueries | None = None
extra_conditions: TraceItemFilter | None = None
@dataclass
| TableQuery |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v3.py | {
"start": 770,
"end": 1080
} | class ____: # <1>
def __init__(self, storage_name):
self.storage_name = storage_name # <2>
def __set__(self, instance, value): # <3>
if value > 0:
instance.__dict__[self.storage_name] = value # <4>
else:
raise ValueError('value must be > 0')
| Quantity |
python | mlflow__mlflow | mlflow/entities/run_info.py | {
"start": 603,
"end": 757
} | class ____(property):
# Wrapper class over property to designate some of the properties as searchable
# run attributes
pass
| searchable_attribute |
python | numba__numba | numba/core/compiler.py | {
"start": 11742,
"end": 16174
} | class ____(object):
"""
Stores and manages states for the compiler
"""
def __init__(self, typingctx, targetctx, library, args, return_type, flags,
locals):
# Make sure the environment is reloaded
config.reload_config()
typingctx.refresh()
targetctx.refresh()
self.state = StateDict()
self.state.typingctx = typingctx
self.state.targetctx = _make_subtarget(targetctx, flags)
self.state.library = library
self.state.args = args
self.state.return_type = return_type
self.state.flags = flags
self.state.locals = locals
# Results of various steps of the compilation pipeline
self.state.bc = None
self.state.func_id = None
self.state.func_ir = None
self.state.lifted = None
self.state.lifted_from = None
self.state.typemap = None
self.state.calltypes = None
self.state.type_annotation = None
# holds arbitrary inter-pipeline stage meta data
self.state.metadata = {}
self.state.reload_init = []
# hold this for e.g. with_lifting, null out on exit
self.state.pipeline = self
# parfor diagnostics info, add to metadata
self.state.parfor_diagnostics = ParforDiagnostics()
self.state.metadata['parfor_diagnostics'] = \
self.state.parfor_diagnostics
self.state.metadata['parfors'] = {}
self.state.status = _CompileStatus(
can_fallback=self.state.flags.enable_pyobject
)
def compile_extra(self, func):
self.state.func_id = bytecode.FunctionIdentity.from_function(func)
ExtractByteCode().run_pass(self.state)
self.state.lifted = ()
self.state.lifted_from = None
return self._compile_bytecode()
def compile_ir(self, func_ir, lifted=(), lifted_from=None):
self.state.func_id = func_ir.func_id
self.state.lifted = lifted
self.state.lifted_from = lifted_from
self.state.func_ir = func_ir
self.state.nargs = self.state.func_ir.arg_count
FixupArgs().run_pass(self.state)
return self._compile_ir()
def define_pipelines(self):
"""Child classes override this to customize the pipelines in use.
"""
raise NotImplementedError()
def _compile_core(self):
"""
Populate and run compiler pipeline
"""
with ConfigStack().enter(self.state.flags.copy()):
pms = self.define_pipelines()
for pm in pms:
pipeline_name = pm.pipeline_name
func_name = "%s.%s" % (self.state.func_id.modname,
self.state.func_id.func_qualname)
event("Pipeline: %s for %s" % (pipeline_name, func_name))
self.state.metadata['pipeline_times'] = {pipeline_name:
pm.exec_times}
is_final_pipeline = pm == pms[-1]
res = None
try:
pm.run(self.state)
if self.state.cr is not None:
break
except _EarlyPipelineCompletion as e:
res = e.result
break
except Exception as e:
if not isinstance(e, errors.NumbaError):
raise e
self.state.status.fail_reason = e
if is_final_pipeline:
raise e
else:
raise CompilerError("All available pipelines exhausted")
# Pipeline is done, remove self reference to release refs to user
# code
self.state.pipeline = None
# organise a return
if res is not None:
# Early pipeline completion
return res
else:
assert self.state.cr is not None
return self.state.cr
def _compile_bytecode(self):
"""
Populate and run pipeline for bytecode input
"""
assert self.state.func_ir is None
return self._compile_core()
def _compile_ir(self):
"""
Populate and run pipeline for IR input
"""
assert self.state.func_ir is not None
return self._compile_core()
| CompilerBase |
python | ray-project__ray | rllib/connectors/action/immutable.py | {
"start": 393,
"end": 1240
} | class ____(ActionConnector):
def transform(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
assert isinstance(
ac_data.output, tuple
), "Action connector requires PolicyOutputType data."
actions, states, fetches = ac_data.output
tree.traverse(make_action_immutable, actions, top_down=False)
return ActionConnectorDataType(
ac_data.env_id,
ac_data.agent_id,
ac_data.input_dict,
(actions, states, fetches),
)
def to_state(self):
return ImmutableActionsConnector.__name__, None
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return ImmutableActionsConnector(ctx)
register_connector(ImmutableActionsConnector.__name__, ImmutableActionsConnector)
| ImmutableActionsConnector |
python | scikit-learn__scikit-learn | sklearn/preprocessing/_label.py | {
"start": 5085,
"end": 25772
} | class ____(TransformerMixin, BaseEstimator, auto_wrap_output_keys=None):
"""Binarize labels in a one-vs-all fashion.
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). `LabelBinarizer` makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. `LabelBinarizer` makes this easy
with the :meth:`inverse_transform` method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int, default=0
Value with which negative labels must be encoded.
pos_label : int, default=1
Value with which positive labels must be encoded.
sparse_output : bool, default=False
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Holds the label for each class.
y_type_ : str
Represents the type of the target data as evaluated by
:func:`~sklearn.utils.multiclass.type_of_target`. Possible type are
'continuous', 'continuous-multioutput', 'binary', 'multiclass',
'multiclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : bool
`True` if the input data to transform is given as a sparse matrix,
`False` otherwise.
See Also
--------
label_binarize : Function to perform the transform operation of
LabelBinarizer with fixed classes.
OneHotEncoder : Encode categorical features using a one-hot aka one-of-K
scheme.
Examples
--------
>>> from sklearn.preprocessing import LabelBinarizer
>>> lb = LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer()
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer()
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
"""
_parameter_constraints: dict = {
"neg_label": [Integral],
"pos_label": [Integral],
"sparse_output": ["boolean"],
}
def __init__(self, *, neg_label=0, pos_label=1, sparse_output=False):
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, y):
"""Fit label binarizer.
Parameters
----------
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : object
Returns the instance itself.
"""
if self.neg_label >= self.pos_label:
raise ValueError(
f"neg_label={self.neg_label} must be strictly less than "
f"pos_label={self.pos_label}."
)
if self.sparse_output and (self.pos_label == 0 or self.neg_label != 0):
raise ValueError(
"Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
f"pos_label={self.pos_label} and neg_label={self.neg_label}"
)
xp, is_array_api = get_namespace(y)
if is_array_api and self.sparse_output and not _is_numpy_namespace(xp):
raise ValueError(
"`sparse_output=True` is not supported for array API "
f"namespace {xp.__name__}. "
"Use `sparse_output=False` to return a dense array instead."
)
self.y_type_ = type_of_target(y, input_name="y")
if "multioutput" in self.y_type_:
raise ValueError(
"Multioutput target data is not supported with label binarization"
)
if _num_samples(y) == 0:
raise ValueError("y has 0 samples: %r" % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def fit_transform(self, y):
"""Fit label binarizer/transform multi-class labels to binary labels.
The output of transform is sometimes referred to as
the 1-of-K coding scheme.
Parameters
----------
y : {ndarray, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
return self.fit(y).transform(y)
def transform(self, y):
"""Transform multi-class labels to binary labels.
The output of transform is sometimes referred to by some authors as
the 1-of-K coding scheme.
Parameters
----------
y : {array, sparse matrix} of shape (n_samples,) or \
(n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification. Sparse matrix can be
CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix
will be of CSR format.
"""
check_is_fitted(self)
xp, is_array_api = get_namespace(y)
if is_array_api and self.sparse_output and not _is_numpy_namespace(xp):
raise ValueError(
"`sparse_output=True` is not supported for array API "
f"namespace {xp.__name__}. "
"Use `sparse_output=False` to return a dense array instead."
)
y_is_multilabel = type_of_target(y).startswith("multilabel")
if y_is_multilabel and not self.y_type_.startswith("multilabel"):
raise ValueError("The object was not fitted with multilabel input.")
return label_binarize(
y,
classes=self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output,
)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels.
Parameters
----------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float, default=None
Threshold used in the binary and multi-label cases.
Use 0 when ``Y`` contains the output of :term:`decision_function`
(classifier).
Use 0.5 when ``Y`` contains the output of :term:`predict_proba`.
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y_original : {ndarray, sparse matrix} of shape (n_samples,)
Target values. Sparse matrix will be of CSR format.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), :meth:`inverse_transform` chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's :term:`decision_function` method directly as the input
of :meth:`inverse_transform`.
"""
check_is_fitted(self)
xp, is_array_api = get_namespace(Y)
if is_array_api and self.sparse_input_ and not _is_numpy_namespace(xp):
raise ValueError(
"`LabelBinarizer` was fitted on a sparse matrix, and therefore cannot "
f"inverse transform a {xp.__name__} array back to a sparse matrix."
)
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.0
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_, xp=xp)
else:
y_inv = _inverse_binarize_thresholding(
Y, self.y_type_, self.classes_, threshold, xp=xp
)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.two_d_array = False
tags.target_tags.one_d_labels = True
return tags
@validate_params(
{
"y": ["array-like", "sparse matrix"],
"classes": ["array-like"],
"neg_label": [Interval(Integral, None, None, closed="neither")],
"pos_label": [Interval(Integral, None, None, closed="neither")],
"sparse_output": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion.
Several regression and binary classification algorithms are
available in scikit-learn. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like or sparse matrix
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape (n_classes,)
Uniquely holds the label for each class.
neg_label : int, default=0
Value with which negative labels must be encoded.
pos_label : int, default=1
Value with which positive labels must be encoded.
sparse_output : bool, default=False,
Set to true if output binary array is desired in CSR sparse format.
Returns
-------
Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)
Shape will be (n_samples, 1) for binary problems. Sparse matrix will
be of CSR format.
See Also
--------
LabelBinarizer : Class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(
y, input_name="y", accept_sparse="csr", ensure_2d=False, dtype=None
)
else:
if _num_samples(y) == 0:
raise ValueError("y has 0 samples: %r" % y)
if neg_label >= pos_label:
raise ValueError(
"neg_label={0} must be strictly less than pos_label={1}.".format(
neg_label, pos_label
)
)
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError(
"Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label)
)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if "multioutput" in y_type:
raise ValueError(
"Multioutput target data is not supported with label binarization"
)
if y_type == "unknown":
raise ValueError("The type of target data is not known")
xp, is_array_api, device_ = get_namespace_and_device(y)
if is_array_api and sparse_output and not _is_numpy_namespace(xp):
raise ValueError(
"`sparse_output=True` is not supported for array API "
f"'namespace {xp.__name__}'. "
"Use `sparse_output=False` to return a dense array instead."
)
try:
classes = xp.asarray(classes, device=device_)
except (ValueError, TypeError) as e:
# `classes` contains an unsupported dtype for this namespace.
# For example, attempting to create torch.tensor(["yes", "no"]) will fail.
raise ValueError(
f"`classes` contains unsupported dtype for array API namespace "
f"'{xp.__name__}'."
) from e
n_samples = y.shape[0] if hasattr(y, "shape") else len(y)
n_classes = classes.shape[0]
if hasattr(y, "dtype") and xp.isdtype(y.dtype, "integral"):
int_dtype_ = y.dtype
else:
int_dtype_ = indexing_dtype(xp)
if y_type == "binary":
if n_classes == 1:
if sparse_output:
return sp.csr_matrix((n_samples, 1), dtype=int)
else:
Y = xp.zeros((n_samples, 1), dtype=int_dtype_)
Y += neg_label
return Y
elif n_classes >= 3:
y_type = "multiclass"
sorted_class = xp.sort(classes)
if y_type == "multilabel-indicator":
y_n_classes = y.shape[1] if hasattr(y, "shape") else len(y[0])
if n_classes != y_n_classes:
raise ValueError(
"classes {0} mismatch with the labels {1} found in the data".format(
classes, unique_labels(y)
)
)
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = _isin(y, classes, xp=xp)
y_seen = y[y_in_classes]
indices = xp.searchsorted(sorted_class, y_seen)
# cast `y_in_classes` to integer dtype for `xp.cumulative_sum`
y_in_classes = xp.astype(y_in_classes, int_dtype_)
indptr = xp.concat(
(
xp.asarray([0], device=device_),
xp.cumulative_sum(y_in_classes, axis=0),
)
)
data = xp.full_like(indices, pos_label)
# Use NumPy to construct the sparse matrix of one-hot labels
Y = sp.csr_matrix(
(
_convert_to_numpy(data, xp=xp),
_convert_to_numpy(indices, xp=xp),
_convert_to_numpy(indptr, xp=xp),
),
shape=(n_samples, n_classes),
)
if not sparse_output:
Y = xp.asarray(Y.toarray(), device=device_)
elif y_type == "multilabel-indicator":
if sparse_output:
Y = sp.csr_matrix(y)
if pos_label != 1:
data = xp.full_like(Y.data, pos_label)
Y.data = data
else:
if sp.issparse(y):
y = y.toarray()
Y = xp.asarray(y, device=device_, copy=True)
if pos_label != 1:
Y[Y != 0] = pos_label
else:
raise ValueError(
"%s target data is not supported with label binarization" % y_type
)
if not sparse_output:
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
Y = xp.astype(Y, int_dtype_, copy=False)
else:
Y.data = Y.data.astype(int, copy=False)
# preserve label ordering
if xp.any(classes != sorted_class):
indices = xp.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y[:, [-1]]
else:
Y = xp.reshape(Y[:, -1], (-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes, xp=None):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
if sp.issparse(y):
classes = np.asarray(classes)
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = min_max_axis(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) & (row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i] : y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
xp, _, device_ = get_namespace_and_device(y, xp=xp)
classes = xp.asarray(classes, device=device_)
indices = xp.argmax(y, axis=1)
indices = xp.clip(indices, 0, classes.shape[0] - 1)
return classes[indices]
def _inverse_binarize_thresholding(y, output_type, classes, threshold, xp=None):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".format(y.shape))
xp, _, device_ = get_namespace_and_device(y, xp=xp)
classes = xp.asarray(classes, device=device_)
if output_type != "binary" and y.shape[1] != classes.shape[0]:
raise ValueError(
"The number of class is not equal to the number of dimension of y."
)
dtype_ = _find_matching_floating_dtype(y, xp=xp)
if hasattr(y, "dtype") and xp.isdtype(y.dtype, "integral"):
int_dtype_ = y.dtype
else:
int_dtype_ = indexing_dtype(xp)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ("csr", "csc"):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=int)
y.eliminate_zeros()
else:
y = xp.asarray(y.toarray() > threshold, dtype=int_dtype_, device=device_)
else:
y = xp.asarray(
xp.asarray(y, dtype=dtype_, device=device_) > threshold,
dtype=int_dtype_,
device=device_,
)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if classes.shape[0] == 1:
return xp.repeat(classes[0], len(y))
else:
return classes[xp.reshape(y, (-1,))]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
| LabelBinarizer |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/c_osx.py | {
"start": 4875,
"end": 5203
} | class ____(Task.Task):
color = 'PINK'
ext_in = ['.bin']
def run(self):
if getattr(self, 'code', None):
txt = self.code
else:
txt = self.inputs[0].read()
context = getattr(self, 'context', {})
txt = txt.format(**context)
self.outputs[0].write(txt)
| macplist |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_staticfiles.py | {
"start": 319,
"end": 5074
} | class ____(BaseTestCase):
panel_id = StaticFilesPanel.panel_id
def test_default_case(self):
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
content = self.panel.content
self.assertIn(
"django.contrib.staticfiles.finders.AppDirectoriesFinder", content
)
self.assertIn(
"django.contrib.staticfiles.finders.FileSystemFinder (2 files)", content
)
self.assertEqual(self.panel.get_stats()["num_used"], 0)
self.assertNotEqual(self.panel.num_found, 0)
expected_apps = ["django.contrib.admin", "debug_toolbar"]
if settings.USE_GIS:
expected_apps = ["django.contrib.gis"] + expected_apps
self.assertEqual(self.panel.get_staticfiles_apps(), expected_apps)
self.assertEqual(
self.panel.get_staticfiles_dirs(), finders.FileSystemFinder().locations
)
async def test_store_staticfiles_with_async_context(self):
async def get_response(request):
# template contains one static file
return render(request, "staticfiles/async_static.html")
self._get_response = get_response
async_request = AsyncRequestFactory().get("/")
response = await self.panel.process_request(async_request)
self.panel.generate_stats(self.request, response)
self.assertEqual(self.panel.get_stats()["num_used"], 1)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_request.
"""
response = self.panel.process_request(self.request)
# ensure the panel does not have content yet.
self.assertNotIn(
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
self.panel.content,
)
self.panel.generate_stats(self.request, response)
# ensure the panel renders correctly.
content = self.panel.content
self.assertIn(
"django.contrib.staticfiles.finders.AppDirectoriesFinder", content
)
self.assertValidHTML(content)
def test_path(self):
def get_response(request):
return render(
request,
"staticfiles/path.html",
{
"paths": [
Path("additional_static/base.css"),
"additional_static/base.css",
"additional_static/base2.css",
]
},
)
self._get_response = get_response
request = RequestFactory().get("/")
response = self.panel.process_request(request)
self.panel.generate_stats(self.request, response)
self.assertEqual(self.panel.get_stats()["num_used"], 2)
self.assertIn(
'href="/static/additional_static/base.css"', self.panel.content, 1
)
self.assertIn(
'href="/static/additional_static/base2.css"', self.panel.content, 1
)
def test_storage_state_preservation(self):
"""Ensure the URLMixin doesn't affect storage state"""
original_storage = storage.staticfiles_storage
original_attrs = dict(original_storage.__dict__)
# Trigger mixin injection
self.panel.ready()
# Verify all original attributes are preserved
self.assertEqual(original_attrs, dict(original_storage.__dict__))
def test_context_variable_lifecycle(self):
"""Test the request_id context variable lifecycle"""
from debug_toolbar.panels.staticfiles import request_id_context_var
# Should not raise when context not set
url = storage.staticfiles_storage.url("test.css")
self.assertTrue(url.startswith("/static/"))
# Should track when context is set
token = request_id_context_var.set("test-request-id")
try:
url = storage.staticfiles_storage.url("test.css")
self.assertTrue(url.startswith("/static/"))
# Verify file was tracked
self.assertIn("test.css", [f[0] for f in self.panel.used_paths])
finally:
request_id_context_var.reset(token)
def test_multiple_initialization(self):
"""Ensure multiple panel initializations don't stack URLMixin"""
storage_class = storage.staticfiles_storage.__class__
# Initialize panel multiple times
for _ in range(3):
self.panel.ready()
# Verify URLMixin appears exactly once in bases
mixin_count = sum(1 for base in storage_class.__bases__ if base == URLMixin)
self.assertEqual(mixin_count, 1)
| StaticFilesPanelTestCase |
python | PrefectHQ__prefect | tests/_experimental/plugins/test_plugins.py | {
"start": 4924,
"end": 6362
} | class ____:
"""Tests for secret redaction."""
def test_redact_secret_key(self):
"""Test that AWS_SECRET_ACCESS_KEY is redacted."""
result = redact("AWS_SECRET_ACCESS_KEY", "supersecret123")
assert result == "••••••"
def test_redact_token(self):
"""Test that TOKEN is redacted."""
result = redact("GITHUB_TOKEN", "ghp_abcdefghijklmnop")
assert result == "••••••"
def test_redact_password(self):
"""Test that PASSWORD is redacted."""
result = redact("DATABASE_PASSWORD", "mypassword")
assert result == "••••••"
def test_no_redaction_normal_key(self):
"""Test that normal keys are not redacted (but truncated if long)."""
result = redact("AWS_REGION", "us-east-1")
assert result == "us-east-1"
def test_truncate_long_value(self):
"""Test that long values are truncated."""
long_value = "x" * 100
result = redact("SOME_VALUE", long_value)
assert len(result) < len(long_value)
assert result.endswith("…")
def test_summarize_env(self):
"""Test that environment summary redacts secrets."""
env = {
"AWS_SECRET_ACCESS_KEY": "supersecret",
"AWS_REGION": "us-east-1",
}
summary = summarize_env(env)
assert summary["AWS_SECRET_ACCESS_KEY"] == "••••••"
assert summary["AWS_REGION"] == "us-east-1"
| TestRedaction |
python | optuna__optuna | optuna/integration/lightgbm.py | {
"start": 771,
"end": 1119
} | class ____(ModuleType):
"""Module class that implements `optuna.integration.lightgbm` package."""
__all__ = __all__
__file__ = globals()["__file__"]
__path__ = [os.path.dirname(__file__)]
def __getattr__(self, name: str) -> Any:
return lgb.__dict__[name]
sys.modules[__name__] = _LightGBMModule(__name__)
| _LightGBMModule |
python | geekcomputers__Python | Test-Case-Generator/test_case.py | {
"start": 20965,
"end": 22046
} | class ____(Case): # Type 2
def __init__(self, master):
super(Type2, self).__init__(master)
self.forget_home()
self.take_input()
def take_input(self): # Type 2
try:
self.try_forget()
except AttributeError:
pass
self.get_t(0)
self.get_n(1)
self.get_m(2)
self.get_a(3)
self.show_button(4)
def generate(self): # Type 2
self.output.delete("1.0", END)
self.output.insert(END, self.t)
self.output.insert(END, "\n")
for i in range(self.t):
self.n = randint(self.n_min, self.n_max)
self.m = randint(self.m_min, self.m_max)
self.output.insert(END, self.n)
self.output.insert(END, " ")
self.output.insert(END, self.m)
self.output.insert(END, "\n")
self.a = [0] * self.n
for j in range(self.n):
self.a[j] = randint(self.a_min, self.a_max)
self.output.insert(END, self.a)
self.output.insert(END, "\n")
| Type2 |
python | crytic__slither | slither/core/expressions/expression.py | {
"start": 71,
"end": 340
} | class ____(SourceMapping):
def __init__(self) -> None:
super().__init__()
self._is_lvalue = False
@property
def is_lvalue(self) -> bool:
return self._is_lvalue
def set_lvalue(self) -> None:
self._is_lvalue = True
| Expression |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 26940,
"end": 27554
} | class ____(torch.nn.ModuleDict):
# Guard names should be valid python identifier as we use eval() to get
# corresponding guard value. Some guard names come from source(module path)
# where special symbols are valid. But they are not valid python identifier,
# we should identify these pattern and rewrite them with getattr.
def __init__(self) -> None:
super().__init__()
for i in range(2):
self.add_module(f"l@yer-{i + 1:d}", BasicModule())
def forward(self, x):
for layer in self.values():
x = layer(x)
return x
| ModuleGuardNameIsValid |
python | ray-project__ray | python/ray/data/tests/test_dataset_stats.py | {
"start": 507,
"end": 14664
} | class ____:
"""Test suite for feature_aggregators_for_dataset function."""
def test_numerical_columns_detection(self):
"""Test that numerical columns are correctly identified and get appropriate aggregators."""
# Create a dataset with various numerical types
data = [
{"int_col": 1, "float_col": 1.5, "decimal_col": 2.3, "string_col": "a"},
{"int_col": 2, "float_col": 2.5, "decimal_col": 3.3, "string_col": "b"},
{"int_col": 3, "float_col": 3.5, "decimal_col": 4.3, "string_col": "c"},
]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Check that numerical columns are identified
assert "int_col" in feature_aggs.numerical_columns
assert "float_col" in feature_aggs.numerical_columns
assert "decimal_col" in feature_aggs.numerical_columns
assert "string_col" not in feature_aggs.numerical_columns
# Check that string columns are identified
assert "string_col" in feature_aggs.str_columns
assert "int_col" not in feature_aggs.str_columns
# Check that no vector columns are identified
assert len(feature_aggs.vector_columns) == 0
# Check that we have the right number of aggregators
# 3 numerical columns * 8 aggregators each + 1 string column * 3 aggregators = 27 total
assert len(feature_aggs.aggregators) == 27
def test_categorical_columns_detection(self):
"""Test that string columns are correctly identified as categorical."""
data = [
{"category": "A", "name": "Alice", "value": 1},
{"category": "B", "name": "Bob", "value": 2},
{"category": "A", "name": "Charlie", "value": 3},
]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Check categorical columns
assert "category" in feature_aggs.str_columns
assert "name" in feature_aggs.str_columns
assert "value" not in feature_aggs.str_columns
# Check numerical columns
assert "value" in feature_aggs.numerical_columns
assert "category" not in feature_aggs.numerical_columns
# Check aggregator count: 1 numerical * 8 + 3 categorical * 2 = 14
assert len(feature_aggs.aggregators) == 14
def test_vector_columns_detection(self):
"""Test that list columns are correctly identified as vector columns."""
data = [
{"vector": [1, 2, 3], "scalar": 1, "text": "hello"},
{"vector": [4, 5, 6], "scalar": 2, "text": "world"},
{"vector": [7, 8, 9], "scalar": 3, "text": "test"},
]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Check vector columns
assert "vector" in feature_aggs.vector_columns
assert "scalar" not in feature_aggs.vector_columns
assert "text" not in feature_aggs.vector_columns
# Check other column types
assert "scalar" in feature_aggs.numerical_columns
assert "text" in feature_aggs.str_columns
# Check aggregator count: 1 numerical * 8 + 1 categorical * 3 + 1 vector * 2 = 12
assert len(feature_aggs.aggregators) == 13
def test_mixed_column_types(self):
"""Test dataset with all column types mixed together."""
data = [
{
"int_val": 1,
"float_val": 1.5,
"string_val": "a",
"vector_val": [1, 2],
"bool_val": True,
},
{
"int_val": 2,
"float_val": 2.5,
"string_val": "b",
"vector_val": [3, 4],
"bool_val": False,
},
]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Check column classification
assert "int_val" in feature_aggs.numerical_columns
assert "float_val" in feature_aggs.numerical_columns
assert "string_val" in feature_aggs.str_columns
assert "vector_val" in feature_aggs.vector_columns
# bool_val should be treated as numerical (integer-like)
assert "bool_val" in feature_aggs.numerical_columns
# Check aggregator count: 3 numerical * 8 + 1 categorical * 3 + 1 vector * 2 = 29
assert len(feature_aggs.aggregators) == 29
def test_column_filtering(self):
"""Test that only specified columns are included when columns parameter is provided."""
data = [
{"col1": 1, "col2": "a", "col3": [1, 2], "col4": 1.5},
{"col1": 2, "col2": "b", "col3": [3, 4], "col4": 2.5},
]
ds = ray.data.from_items(data)
# Test with specific columns
feature_aggs = feature_aggregators_for_dataset(ds, columns=["col1", "col3"])
# Should only include col1 and col3
assert "col1" in feature_aggs.numerical_columns
assert "col2" not in feature_aggs.str_columns
assert "col3" in feature_aggs.vector_columns
assert "col4" not in feature_aggs.numerical_columns
# Check aggregator count: 1 numerical * 8 + 1 vector * 2 = 10
assert len(feature_aggs.aggregators) == 10
def test_empty_dataset_schema(self):
"""Test behavior with empty dataset that has no schema."""
# Create an empty dataset
ds = ray.data.from_items([])
with pytest.raises(ValueError, match="Dataset must have a schema"):
feature_aggregators_for_dataset(ds)
def test_invalid_columns_parameter(self):
"""Test error handling when columns parameter contains non-existent columns."""
data = [{"col1": 1, "col2": "a"}]
ds = ray.data.from_items(data)
with pytest.raises(ValueError, match="Columns .* not found in dataset schema"):
feature_aggregators_for_dataset(ds, columns=["col1", "nonexistent_col"])
@pytest.mark.skipif(
get_pyarrow_version() < parse_version("20.0.0"),
reason="Test requires PyArrow >= 20.0.0",
)
def test_unsupported_column_types(self):
"""Test that unsupported column types are handled gracefully."""
table = pa.table(
{
"supported_int": [1, 2, 3],
"supported_string": ["a", "b", "c"],
"unsupported_timestamp": [pa.scalar(0, type=pa.timestamp("us"))] * 3,
"unsupported_binary": [b"data"] * 3,
}
)
ds = ray.data.from_arrow(table)
feature_aggs = feature_aggregators_for_dataset(ds)
# Only supported types should be included
assert "supported_int" in feature_aggs.numerical_columns
assert "supported_string" in feature_aggs.str_columns
assert "unsupported_timestamp" not in feature_aggs.numerical_columns
assert "unsupported_timestamp" not in feature_aggs.str_columns
assert "unsupported_timestamp" not in feature_aggs.vector_columns
assert "unsupported_binary" not in feature_aggs.numerical_columns
assert "unsupported_binary" not in feature_aggs.str_columns
assert "unsupported_binary" not in feature_aggs.vector_columns
# Check aggregator count: 1 numerical * 8 + 1 categorical * 3 = 11
assert len(feature_aggs.aggregators) == 11
def test_aggregator_types_verification(self):
"""Test that the correct aggregator types are generated for each column type."""
data = [
{"num": 1, "cat": "a", "vec": [1, 2]},
{"num": 2, "cat": "b", "vec": [3, 4]},
]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Check that we have the right types of aggregators
agg_names = [agg.name for agg in feature_aggs.aggregators]
# Numerical aggregators should include all 8 types
num_agg_names = [name for name in agg_names if "num" in name]
assert len(num_agg_names) == 8
assert any("count" in name.lower() for name in num_agg_names)
assert any("mean" in name.lower() for name in num_agg_names)
assert any("min" in name.lower() for name in num_agg_names)
assert any("max" in name.lower() for name in num_agg_names)
assert any("std" in name.lower() for name in num_agg_names)
assert any("missing" in name.lower() for name in num_agg_names)
assert any("zero" in name.lower() for name in num_agg_names)
assert any("approx_quantile" in name.lower() for name in num_agg_names)
# Categorical aggregators should include count and missing percentage
cat_agg_names = [name for name in agg_names if "cat" in name]
assert len(cat_agg_names) == 3
assert any("count" in name.lower() for name in cat_agg_names)
assert any("missing" in name.lower() for name in cat_agg_names)
# Vector aggregators should include count and missing percentage
vec_agg_names = [name for name in agg_names if "vec" in name]
assert len(vec_agg_names) == 2
assert any("count" in name.lower() for name in vec_agg_names)
assert any("missing" in name.lower() for name in vec_agg_names)
def test_aggregator_instances_verification(self):
"""Test that the actual aggregator instances are of the correct types."""
data = [{"num": 1, "cat": "a"}]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds)
# Find aggregators for the numerical column
num_aggs = [agg for agg in feature_aggs.aggregators if "num" in agg.name]
assert len(num_aggs) == 8
# Check that we have the right aggregator types
agg_types = [type(agg) for agg in num_aggs]
assert Count in agg_types
assert Mean in agg_types
assert Min in agg_types
assert Max in agg_types
assert Std in agg_types
assert MissingValuePercentage in agg_types
assert ZeroPercentage in agg_types
assert ApproximateQuantile in agg_types
# Find aggregators for the categorical column
cat_aggs = [agg for agg in feature_aggs.aggregators if "cat" in agg.name]
assert len(cat_aggs) == 3
# Check that we have the right aggregator types for categorical
cat_agg_types = [type(agg) for agg in cat_aggs]
assert Count in cat_agg_types
assert MissingValuePercentage in cat_agg_types
assert ApproximateTopK in cat_agg_types
# Should not have numerical aggregators for categorical columns
assert Mean not in cat_agg_types
assert Min not in cat_agg_types
assert Max not in cat_agg_types
assert Std not in cat_agg_types
assert ZeroPercentage not in cat_agg_types
def test_return_dataclass_structure(self):
"""Test that the function returns the correct FeatureAggregators dataclass."""
data = [{"num": 1, "cat": "a", "vec": [1, 2]}]
ds = ray.data.from_items(data)
result = feature_aggregators_for_dataset(ds)
# Should return a FeatureAggregators dataclass
assert isinstance(result, FeatureAggregators)
# Check that attributes exist and are lists
assert isinstance(result.numerical_columns, list)
assert isinstance(result.str_columns, list)
assert isinstance(result.vector_columns, list)
assert isinstance(result.aggregators, list)
# Check that column names are strings
for col in (
result.numerical_columns + result.str_columns + result.vector_columns
):
assert isinstance(col, str)
# Check that aggregators have required attributes
for agg in result.aggregators:
assert hasattr(agg, "name")
assert hasattr(agg, "get_target_column")
def test_none_columns_parameter(self):
"""Test that None columns parameter includes all columns."""
data = [{"col1": 1, "col2": "a"}]
ds = ray.data.from_items(data)
# Test with None (should be same as not providing columns parameter)
result1 = feature_aggregators_for_dataset(ds, columns=None)
result2 = feature_aggregators_for_dataset(ds)
# Compare the dataclass attributes
assert result1.numerical_columns == result2.numerical_columns
assert result1.str_columns == result2.str_columns
assert result1.vector_columns == result2.vector_columns
assert len(result1.aggregators) == len(result2.aggregators)
def test_empty_columns_list(self):
"""Test behavior with empty columns list."""
data = [{"col1": 1, "col2": "a"}]
ds = ray.data.from_items(data)
feature_aggs = feature_aggregators_for_dataset(ds, columns=[])
# Should have no columns and no aggregators
assert len(feature_aggs.numerical_columns) == 0
assert len(feature_aggs.str_columns) == 0
assert len(feature_aggs.vector_columns) == 0
assert len(feature_aggs.aggregators) == 0
def test_large_dataset_performance(self):
"""Test performance with a larger dataset to ensure it scales reasonably."""
# Create a larger dataset
data = []
for i in range(1000):
data.append(
{
"id": i,
"value": i * 1.5,
"category": f"cat_{i % 10}",
"vector": [i, i + 1, i + 2],
}
)
ds = ray.data.from_items(data)
# Should complete without issues
feature_aggs = feature_aggregators_for_dataset(ds)
# Verify results
assert "id" in feature_aggs.numerical_columns
assert "value" in feature_aggs.numerical_columns
assert "category" in feature_aggs.str_columns
assert "vector" in feature_aggs.vector_columns
# Check aggregator count: 2 numerical * 8 + 1 categorical * 3 + 1 vector * 2 = 21
assert len(feature_aggs.aggregators) == 21
| TestFeatureAggregatorsForDataset |
python | getsentry__sentry | src/sentry/seer/similarity/types.py | {
"start": 1400,
"end": 4367
} | class ____:
stacktrace_distance: float
should_group: bool
parent_group_id: int
parent_hash: str
# Unfortunately, we have to hardcode this separately from the `RawSeerSimilarIssueData` type
# definition because Python has no way to derive it from the type (nor vice-versa)
required_incoming_keys: ClassVar = {
"stacktrace_distance",
"should_group",
"parent_hash",
}
optional_incoming_keys: ClassVar[set[str]] = set()
expected_incoming_keys: ClassVar = {*required_incoming_keys, *optional_incoming_keys}
@classmethod
def from_raw(cls, project_id: int, raw_similar_issue_data: Mapping[str, Any]) -> Self:
"""
Create an instance of `SeerSimilarIssueData` from the raw data that comes back from Seer,
using the parent hash to look up the parent group id. Needs to be run individually on each
similar issue in the Seer response.
Throws an `IncompleteSeerDataError` if given data with any required keys missing, a
`SimilarHashNotFoundError` if the data points to a grouphash which no longer exists, and a
`SimilarHashMissingGroupError` if the the data points to a grouphash not assigned to a
group. The latter two guarantee that if this successfully returns, the parent group id in the
return value points to an existing group.
"""
# Filter out any data we're not expecting, and then make sure what's left isn't missing anything
raw_similar_issue_data = {
k: v for k, v in raw_similar_issue_data.items() if k in cls.expected_incoming_keys
}
missing_keys = cls.required_incoming_keys - raw_similar_issue_data.keys()
if missing_keys:
raise IncompleteSeerDataError(
"Seer similar issues response entry missing "
+ ("keys " if len(missing_keys) > 1 else "key ")
+ ", ".join(map(lambda key: f"'{key}'", sorted(missing_keys)))
)
# Now that we know we have all the right data, use the parent group's hash to look up its id
parent_grouphash = (
GroupHash.objects.filter(
project_id=project_id, hash=raw_similar_issue_data["parent_hash"]
)
.exclude(state=GroupHash.State.LOCKED_IN_MIGRATION)
.first()
)
if not parent_grouphash:
raise SimilarHashNotFoundError("Similar hash suggested by Seer does not exist")
if not parent_grouphash.group_id:
raise SimilarHashMissingGroupError("Similar hash suggested by Seer missing group id")
# TODO: The `Any` casting here isn't great, but Python currently has no way to
# relate typeddict keys to dataclass properties
similar_issue_data: Any = {
**raw_similar_issue_data,
"parent_group_id": parent_grouphash.group_id,
}
return cls(**similar_issue_data)
| SeerSimilarIssueData |
python | apache__airflow | providers/teradata/tests/unit/teradata/hooks/test_teradata.py | {
"start": 1029,
"end": 11838
} | class ____:
def setup_method(self):
self.connection = Connection(
conn_id="teradata_conn_id",
conn_type="teradata",
login="login",
password="password",
host="host",
schema="schema",
)
self.db_hook = TeradataHook(teradata_conn_id="teradata_conn_id", database="test_db")
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.login = "mock_login"
self.conn.password = "mock_password"
self.conn.host = "mock_host"
self.conn.schema = "mock_schema"
self.conn.port = 1025
self.conn.cursor.return_value = self.cur
self.conn.extra_dejson = {}
conn = self.conn
class UnitTestTeradataHook(TeradataHook):
def get_conn(self):
return conn
@classmethod
def get_connection(cls, conn_id: str) -> Connection:
return conn
self.test_db_hook = UnitTestTeradataHook(teradata_conn_id="teradata_conn_id")
self.test_db_hook.get_uri = mock.Mock(return_value="sqlite://")
@mock.patch("teradatasql.connect")
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
@mock.patch("teradatasql.connect")
def test_get_tmode_conn(self, mock_connect):
tmode_name = {"tmode": "tera"}
self.connection.extra = json.dumps(tmode_name)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["tmode"] == "tera"
@mock.patch("teradatasql.connect")
def test_get_sslmode_conn(self, mock_connect):
tmode_name = {"sslmode": "require"}
self.connection.extra = json.dumps(tmode_name)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslmode"] == "require"
@mock.patch("teradatasql.connect")
def test_get_sslverifyca_conn(self, mock_connect):
extravalues = {"sslmode": "verify-ca", "sslca": "/tmp/cert"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslmode"] == "verify-ca"
assert kwargs["sslca"] == "/tmp/cert"
@mock.patch("teradatasql.connect")
def test_get_sslverifyfull_conn(self, mock_connect):
extravalues = {"sslmode": "verify-full", "sslca": "/tmp/cert"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslmode"] == "verify-full"
assert kwargs["sslca"] == "/tmp/cert"
@mock.patch("teradatasql.connect")
def test_get_sslcrc_conn(self, mock_connect):
extravalues = {"sslcrc": "sslcrc"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslcrc"] == "sslcrc"
@mock.patch("teradatasql.connect")
def test_get_sslprotocol_conn(self, mock_connect):
extravalues = {"sslprotocol": "protocol"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslprotocol"] == "protocol"
@mock.patch("teradatasql.connect")
def test_get_sslcipher_conn(self, mock_connect):
extravalues = {"sslcipher": "cipher"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert kwargs["sslcipher"] == "cipher"
def test_get_uri_without_schema(self):
self.connection.schema = "" # simulate missing schema
self.db_hook.get_connection.return_value = self.connection
uri = self.db_hook.get_uri()
expected_uri = f"teradatasql://{self.connection.login}:***@{self.connection.host}"
assert uri == expected_uri
def test_get_uri(self):
ret_uri = self.db_hook.get_uri()
expected_uri = (
f"teradatasql://{self.connection.login}:***@{self.connection.host}/{self.connection.schema}"
if self.connection.schema
else f"teradatasql://{self.connection.login}:***@{self.connection.host}"
)
assert expected_uri == ret_uri
def test_get_records(self):
sql = "SQL"
self.test_db_hook.get_records(sql)
self.cur.execute.assert_called_once_with(sql)
assert self.conn.commit.called
def test_run_without_parameters(self):
sql = "SQL"
self.test_db_hook.run(sql)
self.cur.execute.assert_called_once_with(sql)
assert self.conn.commit.called
def test_run_with_parameters(self):
sql = "SQL"
param = ("p1", "p2")
self.test_db_hook.run(sql, parameters=param)
self.cur.execute.assert_called_once_with(sql, param)
assert self.conn.commit.called
def test_insert_rows(self):
rows = [
(
"'test_string",
None,
datetime(2023, 8, 15),
1,
3.14,
"str",
)
]
target_fields = [
"basestring",
"none",
"datetime",
"int",
"float",
"str",
]
self.test_db_hook.insert_rows("table", rows, target_fields)
self.cur.executemany.assert_called_once_with(
"INSERT INTO table (basestring, none, datetime, int, float, str) VALUES (?,?,?,?,?,?)",
[("'test_string", None, "2023-08-15T00:00:00", "1", "3.14", "str")],
)
def test_call_proc_dict(self):
parameters = {"a": 1, "b": 2, "c": 3}
class bindvar(int):
def getvalue(self):
return self
self.cur.fetchall.return_value = {k: bindvar(v) for k, v in parameters.items()}
result = self.test_db_hook.callproc("proc", True, parameters)
assert result == parameters
def test_set_query_band(self):
query_band_text = "example_query_band_text"
_handle_user_query_band_text(query_band_text)
self.test_db_hook.set_query_band(query_band_text, self.conn)
self.conn.cursor.assert_called_once()
@mock.patch("teradatasql.connect")
def test_query_band_not_in_conn_config(self, mock_connect):
extravalues = {"query_band": "appname=airflow;org=test;"}
self.connection.extra = json.dumps(extravalues)
self.db_hook.get_conn()
assert mock_connect.call_count == 1
args, kwargs = mock_connect.call_args
assert args == ()
assert kwargs["host"] == "host"
assert kwargs["database"] == "schema"
assert kwargs["dbs_port"] == 1025
assert kwargs["user"] == "login"
assert kwargs["password"] == "password"
assert "query_band" not in kwargs
def test_handle_user_query_band_text_invalid():
query_band_text = _handle_user_query_band_text("invalid_queryband")
assert query_band_text == "invalid_queryband;org=teradata-internal-telem;appname=airflow;"
def test_handle_user_query_band_text_override_appname():
query_band_text = _handle_user_query_band_text("appname=test;")
assert query_band_text == "appname=test_airflow;org=teradata-internal-telem;"
def test_handle_user_query_band_text_append_org():
query_band_text = _handle_user_query_band_text("appname=airflow;")
assert query_band_text == "appname=airflow;org=teradata-internal-telem;"
def test_handle_user_query_band_text_user_org():
query_band_text = _handle_user_query_band_text("appname=airflow;org=test")
assert query_band_text == "appname=airflow;org=test"
def test_handle_user_query_band_text_none():
query_band_text = _handle_user_query_band_text(None)
assert query_band_text == "org=teradata-internal-telem;appname=airflow;"
def test_handle_user_query_band_text_no_appname():
query_band_text = _handle_user_query_band_text("org=test;")
assert query_band_text == "org=test;appname=airflow;"
def test_handle_user_query_band_text_no_appname_with_teradata_org():
query_band_text = _handle_user_query_band_text("org=teradata-internal-telem;")
assert query_band_text == "org=teradata-internal-telem;appname=airflow;"
| TestTeradataHook |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/redshift_cluster.py | {
"start": 16104,
"end": 20777
} | class ____(AwsBaseOperator[RedshiftHook]):
"""
Creates a manual snapshot of the specified cluster. The cluster must be in the available state.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftCreateClusterSnapshotOperator`
:param snapshot_identifier: A unique identifier for the snapshot that you are requesting
:param cluster_identifier: The cluster identifier for which you want a snapshot
:param retention_period: The number of days that a manual snapshot is retained.
If the value is -1, the manual snapshot is retained indefinitely.
:param tags: A list of tag instances
:param wait_for_completion: Whether wait for the cluster snapshot to be in ``available`` state
:param poll_interval: Time (in seconds) to wait between two consecutive calls to check state
:param max_attempt: The maximum number of attempts to be made to check the state
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param deferrable: If True, the operator will run as a deferrable operator.
"""
template_fields: Sequence[str] = aws_template_fields(
"cluster_identifier",
"snapshot_identifier",
)
aws_hook_class = RedshiftHook
def __init__(
self,
*,
snapshot_identifier: str,
cluster_identifier: str,
retention_period: int = -1,
tags: list[Any] | None = None,
wait_for_completion: bool = False,
poll_interval: int = 15,
max_attempt: int = 20,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.snapshot_identifier = snapshot_identifier
self.cluster_identifier = cluster_identifier
self.retention_period = retention_period
self.tags = tags
self.wait_for_completion = wait_for_completion
self.poll_interval = poll_interval
self.max_attempt = max_attempt
self.deferrable = deferrable
def execute(self, context: Context) -> Any:
cluster_state = self.hook.cluster_status(cluster_identifier=self.cluster_identifier)
if cluster_state != "available":
raise AirflowException(
"Redshift cluster must be in available state. "
f"Redshift cluster current state is {cluster_state}"
)
self.hook.create_cluster_snapshot(
cluster_identifier=self.cluster_identifier,
snapshot_identifier=self.snapshot_identifier,
retention_period=self.retention_period,
tags=self.tags,
)
if self.deferrable:
self.defer(
trigger=RedshiftCreateClusterSnapshotTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempt,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
# timeout is set to ensure that if a trigger dies, the timeout does not restart
# 60 seconds is added to allow the trigger to exit gracefully (i.e. yield TriggerEvent)
timeout=timedelta(seconds=self.max_attempt * self.poll_interval + 60),
)
if self.wait_for_completion:
self.hook.conn.get_waiter("snapshot_available").wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempt,
},
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error creating snapshot: {validated_event}")
self.log.info("Cluster snapshot created.")
| RedshiftCreateClusterSnapshotOperator |
python | django-guardian__django-guardian | guardian/testapp/tests/test_shortcuts.py | {
"start": 62774,
"end": 62912
} | class ____(ContentTypeCacheMixin, TransactionTestCase):
"""Test cache against TransactionTestCase"""
| ContentTypeCacheTransactionTestCase |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 66498,
"end": 67363
} | class ____(dict[str, "MemoizedZipManifests.manifest_mod"]):
"""
zip manifest builder
"""
# `path` could be `StrPath | IO[bytes]` but that violates the LSP for `MemoizedZipManifests.load`
@classmethod
def build(cls, path: str) -> dict[str, zipfile.ZipInfo]:
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
| ZipManifests |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 184442,
"end": 185210
} | class ____(Operation):
def call(self, x):
return backend.numpy.sin(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.sin", "keras.ops.numpy.sin"])
def sin(x):
"""Trigonometric sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Sin().symbolic_call(x)
return backend.numpy.sin(x)
| Sin |
python | huggingface__transformers | src/transformers/models/evolla/modular_evolla.py | {
"start": 5762,
"end": 5816
} | class ____(EsmAttention):
pass
| EvollaSaProtAttention |
python | sanic-org__sanic | sanic/logging/color.py | {
"start": 411,
"end": 1586
} | class ____(StrEnum): # no cov
"""
Colors for log messages. If the output is not a TTY, the colors will be
disabled.
Can be used like this:
.. code-block:: python
from sanic.log import logger, Colors
logger.info(f"{Colors.GREEN}This is a green message{Colors.END}")
Attributes:
END: Reset the color
BOLD: Bold text
BLUE: Blue text
GREEN: Green text
PURPLE: Purple text
RED: Red text
SANIC: Sanic pink
YELLOW: Yellow text
GREY: Grey text
"""
END = "\033[0m" if COLORIZE else ""
BOLD = "\033[1m" if COLORIZE else ""
BLUE = "\033[34m" if COLORIZE else ""
GREEN = "\033[32m" if COLORIZE else ""
PURPLE = "\033[35m" if COLORIZE else ""
CYAN = "\033[36m" if COLORIZE else ""
RED = "\033[31m" if COLORIZE else ""
YELLOW = "\033[33m" if COLORIZE else ""
GREY = "\033[38;5;240m" if COLORIZE else ""
SANIC = "\033[38;2;255;13;104m" if COLORIZE else ""
LEVEL_COLORS = {
logging.DEBUG: Colors.BLUE,
logging.WARNING: Colors.YELLOW,
logging.ERROR: Colors.RED,
logging.CRITICAL: Colors.RED + Colors.BOLD,
}
| Colors |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_any.py | {
"start": 29256,
"end": 29369
} | class ____(ipaddress.IPv6Network):
def __str__(self):
return super().__str__() + '_subclassed'
| SubNetV6 |
python | getsentry__sentry | src/sentry/integrations/bitbucket/installed.py | {
"start": 600,
"end": 1241
} | class ____(Endpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
authentication_classes = ()
permission_classes = ()
@csrf_exempt
def dispatch(self, request: HttpRequest, *args, **kwargs) -> HttpResponseBase:
return super().dispatch(request, *args, **kwargs)
def post(self, request: Request, *args, **kwargs) -> Response:
state = request.data
data = BitbucketIntegrationProvider().build_integration(state)
ensure_integration(IntegrationProviderSlug.BITBUCKET.value, data)
return self.respond()
| BitbucketInstalledEndpoint |
python | langchain-ai__langchain | libs/cli/langchain_cli/integration_template/integration_template/retrievers.py | {
"start": 232,
"end": 2915
} | class ____(BaseRetriever):
# TODO: Replace all TODOs in docstring. See example docstring:
# https://github.com/langchain-ai/langchain/blob/master/libs/community/langchain_community/retrievers/tavily_search_api.py#L17
"""__ModuleName__ retriever.
# TODO: Replace with relevant packages, env vars, etc.
Setup:
Install `__package_name__` and set environment variable
`__MODULE_NAME___API_KEY`.
```bash
pip install -U __package_name__
export __MODULE_NAME___API_KEY="your-api-key"
```
# TODO: Populate with relevant params.
Key init args:
arg 1: type
description
arg 2: type
description
# TODO: Replace with relevant init params.
Instantiate:
```python
from __package_name__ import __ModuleName__Retriever
retriever = __ModuleName__Retriever(
# ...
)
```
Usage:
```python
query = "..."
retriever.invoke(query)
```
```txt
# TODO: Example output.
```
Use within a chain:
```python
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template(
\"\"\"Answer the question based only on the context provided.
Context: {context}
Question: {question}\"\"\"
)
model = ChatOpenAI(model="gpt-3.5-turbo-0125")
def format_docs(docs):
return "\\n\\n".join(doc.page_content for doc in docs)
chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
chain.invoke("...")
```
```
# TODO: Example output.
```
"""
k: int = 3
# TODO: This method must be implemented to retrieve documents.
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> List[Document]:
k = kwargs.get("k", self.k)
return [
Document(page_content=f"Result {i} for query: {query}") for i in range(k)
]
# optional: add custom async implementations here
# async def _aget_relevant_documents(
# self,
# query: str,
# *,
# run_manager: AsyncCallbackManagerForRetrieverRun,
# **kwargs: Any,
# ) -> List[Document]: ...
| __ModuleName__Retriever |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 5399,
"end": 6211
} | class ____(ModelOutput):
r"""
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
"""
text_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
"""
)
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Blip2
| Blip2TextModelOutput |
python | google__jax | jax/_src/core.py | {
"start": 11457,
"end": 13207
} | class ____:
__slots__ = ['context', 'prev_compute_type', 'prev_threefry_partitionable',
'prev_xla_metadata', 'prev_abstract_mesh']
def __init__(self, context):
self.context = context
def __enter__(self):
self.prev_compute_type = config.compute_on_context_manager.swap_local(
self.context.compute_type
)
if (
self.prev_compute_type is not None
and self.prev_compute_type is not config_ext.unset
and self.context.compute_type != self.prev_compute_type
):
config.compute_on_context_manager.set_local(self.prev_compute_type)
raise NotImplementedError(
"Nesting `compute_on` with different compute types is not supported"
f" yet. Current compute_on type: {self.prev_compute_type}"
)
self.prev_threefry_partitionable = config.threefry_partitionable.swap_local(
self.context.threefry_partitionable
)
if self.context.xla_metadata:
self.prev_xla_metadata = config.xla_metadata_context_manager.get_local()
updated = xla_metadata_lib.update_metadata(
self.prev_xla_metadata, self.context.xla_metadata
)
config.xla_metadata_context_manager.set_local(updated)
self.prev_abstract_mesh = config.abstract_mesh_context_manager.swap_local(
self.context.cur_abstract_mesh
)
def __exit__(self, exc_type, exc_value, traceback):
config.compute_on_context_manager.set_local(self.prev_compute_type)
config.threefry_partitionable.set_local(self.prev_threefry_partitionable)
if self.context.xla_metadata:
config.xla_metadata_context_manager.set_local(self.prev_xla_metadata)
config.abstract_mesh_context_manager.set_local(self.prev_abstract_mesh)
| JaxprEqnContextManager |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pep8_naming/N818.py | {
"start": 143,
"end": 175
} | class ____(AnotherError):
pass
| E |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 962,
"end": 1396
} | class ____(models.Model):
name = models.CharField(max_length=100)
category_fk = models.ForeignKey(
"Category",
to_field="category_uuid",
null=True,
related_name="items_by_fk",
on_delete=models.CASCADE,
)
category_pk = models.ForeignKey(
"Category", null=True, related_name="items_by_pk", on_delete=models.CASCADE
)
def __str__(self):
return self.name
| Item |
python | ray-project__ray | python/ray/dag/tests/experimental/test_torch_tensor_transport.py | {
"start": 418,
"end": 3632
} | class ____:
def echo_device(self, tensor: torch.Tensor) -> str:
if isinstance(tensor, RaySystemError):
raise tensor
return str(tensor.device)
def echo_dict_device(
self, dict_of_tensors: Dict[str, torch.Tensor]
) -> Dict[str, str]:
if isinstance(dict_of_tensors, RaySystemError):
raise dict_of_tensors
return {k: str(v.device) for k, v in dict_of_tensors.items()}
def send(self, device: str) -> torch.Tensor:
return torch.ones((100,), device=device)
def send_dict(self, name_device_pairs: Dict[str, str]) -> Dict[str, torch.Tensor]:
tensor_dict = {}
for name, device in name_device_pairs.items():
tensor_dict[name] = torch.ones((100,), device=device)
return tensor_dict
def run_driver_to_worker_dag(actor, device, tensor_input, is_dict=False):
"""Create and execute a DAG with tensor transport for driver to worker tests.
Args:
actor: Ray actor to use
device: Target device ("cpu", "cuda", or "default")
tensor_input: Input tensor(s) to execute with
is_dict: Whether to use dict version of the method
Returns:
ray.ObjectRef: Result reference
"""
with InputNode() as inp:
method = actor.echo_dict_device if is_dict else actor.echo_device
dag = method.bind(inp.with_tensor_transport(device=device))
compiled_dag = dag.experimental_compile()
return compiled_dag.execute(tensor_input)
def run_worker_to_worker_dag(sender, receiver, device, input_device, is_dict=False):
"""Create and execute a DAG with tensor transport for worker to worker tests.
Args:
sender: Sender Ray actor
receiver: Receiver Ray actor
device: Target device for tensor transport
input_device: Device string to pass to sender
is_dict: Whether to use dict version of the methods
Returns:
ray.ObjectRef: Result reference or ValueError for compilation errors
"""
with InputNode() as inp:
if is_dict:
tensor = sender.send_dict.bind(inp)
dag = receiver.echo_dict_device.bind(
tensor.with_tensor_transport(device=device)
)
else:
tensor = sender.send.bind(inp)
dag = receiver.echo_device.bind(tensor.with_tensor_transport(device=device))
compiled_dag = dag.experimental_compile()
return compiled_dag.execute(input_device)
def run_worker_to_driver_dag(actor, device, input_device, is_dict=False):
"""Create and execute a DAG with tensor transport for worker to driver tests.
Args:
actor: Ray actor to use
device: Target device for tensor transport
input_device: Device string to pass to actor
is_dict: Whether to use dict version of the method
Returns:
ray.ObjectRef: Result reference
"""
with InputNode() as inp:
if is_dict:
dag = actor.send_dict.bind(inp).with_tensor_transport(device=device)
else:
dag = actor.send.bind(inp).with_tensor_transport(device=device)
compiled_dag = dag.experimental_compile()
return compiled_dag.execute(input_device)
| Actor |
python | sqlalchemy__sqlalchemy | test/orm/test_immediate_load.py | {
"start": 5698,
"end": 8829
} | class ____(_fixtures.FixtureTest):
run_inserts = None
run_deletes = "each"
@testing.fixture
def node_fixture(self):
Node = self.classes.Node
nodes = self.tables.nodes
def go(join_depth):
self.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"parent": relationship(
Node,
remote_side=nodes.c.id,
lazy="immediate",
join_depth=join_depth,
)
},
)
return Node
yield go
# 1. the fixture uses InnoDB, so foreign keys are enforced
# 2. "delete from nodes" in InnoDB is not smart enough to delete
# all rows in the correct order automatically
# 3. "DELETE..ORDER BY" is mysql specific. we want the TablesTest
# fixture to be generic
# 4. Can't add "ON DELETE CASCADE" to that fixture because SQL Server
# rejects it
# 5. so until we have a "delete from all tables taking FKs into
# account" routine, we need a custom teardown here for MySQL/MariaDB
# 6. A similar fixture in test_recursive_loaders is cheating since it's
# hardcoding to using MyISAM for MySQL
with Session(testing.db) as sess:
for node in sess.scalars(select(Node)):
sess.delete(node)
sess.commit()
@testing.variation("persistence", ["expunge", "keep", "reload"])
@testing.combinations((None,), (1,), (2,), argnames="join_depth")
def test_self_referential_recursive(
self, persistence, join_depth, node_fixture
):
"""test #10139"""
Node = node_fixture(join_depth)
sess = fixture_session()
n0 = Node(data="n0")
n1 = Node(data="n1")
n2 = Node(data="n2")
n1.parent = n0
n2.parent = n1
sess.add_all([n0, n1, n2])
sess.commit()
if persistence.expunge or persistence.reload:
sess.close()
if persistence.reload:
sess.add(n1)
sess.add(n0)
n2 = sess.query(Node).filter(Node.data == "n2").one()
if persistence.expunge and (join_depth is None or join_depth < 1):
expected_count = 1
else:
expected_count = 0
with self.assert_statement_count(testing.db, expected_count):
if persistence.keep or persistence.reload:
is_(n2.parent, n1)
else:
eq_(n2.parent, Node(data="n1"))
n1 = n2.parent
# ensure n1.parent_id is unexpired
n1.parent_id
if persistence.expunge and (join_depth is None or join_depth < 2):
expected_count = 1
else:
expected_count = 0
with self.assert_statement_count(testing.db, expected_count):
if persistence.keep or persistence.reload:
is_(n1.parent, n0)
else:
eq_(n1.parent, Node(data="n0"))
| SelfReferentialTest |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 5843,
"end": 7052
} | class ____:
def test_create_with_data(self) -> None:
dat = xr.Dataset({"a": 0})
john = DataTree(name="john", dataset=dat)
assert_identical(john.to_dataset(), dat)
with pytest.raises(TypeError):
DataTree(name="mary", dataset="junk") # type: ignore[arg-type]
def test_set_data(self) -> None:
john = DataTree(name="john")
dat = xr.Dataset({"a": 0})
john.dataset = dat # type: ignore[assignment,unused-ignore]
assert_identical(john.to_dataset(), dat)
with pytest.raises(TypeError):
john.dataset = "junk" # type: ignore[assignment]
def test_has_data(self) -> None:
john = DataTree(name="john", dataset=xr.Dataset({"a": 0}))
assert john.has_data
john_no_data = DataTree(name="john", dataset=None)
assert not john_no_data.has_data
def test_is_hollow(self) -> None:
john = DataTree(dataset=xr.Dataset({"a": 0}))
assert john.is_hollow
eve = DataTree(children={"john": john})
assert eve.is_hollow
eve.dataset = xr.Dataset({"a": 1}) # type: ignore[assignment,unused-ignore]
assert not eve.is_hollow
| TestStoreDatasets |
python | pytorch__pytorch | torch/fx/experimental/validator.py | {
"start": 26973,
"end": 27877
} | class ____(TorchDynamoException):
def __init__(self, model, assertions, target_exprs, failed_source_exprs):
assert _HAS_Z3
def symbolstr(sym) -> str:
return f"{sym}: {model[sym]}"
def joinlines(xs) -> str:
return "\n".join(f" ==> {x}" for x in xs)
model_str = joinlines(sorted(map(symbolstr, model)))
assertions_str = joinlines(sorted(map(z3str, assertions)))
target_exprs_str = joinlines(sorted(map(z3str, target_exprs)))
failed_source_exprs_str = joinlines(sorted(map(z3str, failed_source_exprs)))
self.msg = "translation validation failed."
self.details = f"""\
Model:
{model_str}
Assertions:
{assertions_str}
Target Expressions:
{target_exprs_str}
Failed Source Expressions:
{failed_source_exprs_str}"""
def __str__(self):
return f"{self.msg}\n\n{self.details}"
| ValidationException |
python | astropy__astropy | astropy/coordinates/tests/test_representation_arithmetic.py | {
"start": 43671,
"end": 45850
} | class ____:
"""Test copied from SphericalDifferential, so less extensive."""
def setup_method(self):
s = CartesianRepresentation(
x=[1, 2, 3] * u.kpc, y=[2, 3, 1] * u.kpc, z=[3, 1, 2] * u.kpc
)
self.s = s
self.e = s.unit_vectors()
self.sf = s.scale_factors()
def test_name(self):
assert CartesianDifferential.name == "cartesian"
assert CartesianDifferential.name in DIFFERENTIAL_CLASSES
def test_simple_differentials(self):
s, e, sf = self.s, self.e, self.sf
for d, differential in ( # test different inits while we're at it.
("x", CartesianDifferential(1.0 * u.pc, 0.0 * u.pc, 0.0 * u.pc)),
("y", CartesianDifferential([0.0, 1.0, 0.0], unit=u.pc)),
(
"z",
CartesianDifferential(np.array([[0.0, 0.0, 1.0]]) * u.pc, xyz_axis=1),
),
):
o_c = differential.to_cartesian(base=s)
o_c2 = differential.to_cartesian()
assert np.all(representation_equal(o_c, o_c2))
assert all(
np.all(getattr(differential, "d_" + c) == getattr(o_c, c))
for c in ("x", "y", "z")
)
differential2 = CartesianDifferential.from_cartesian(o_c)
assert np.all(representation_equal(differential2, differential))
differential3 = CartesianDifferential.from_cartesian(o_c, base=o_c)
assert np.all(representation_equal(differential3, differential))
s_off = s + 1.0 * u.pc * sf[d] * e[d]
assert_representation_allclose(o_c, s_off - s, atol=1e-10 * u.kpc)
s_off2 = s + differential
assert_representation_allclose(s_off2, s_off)
def test_init_failures(self):
with pytest.raises(ValueError):
CartesianDifferential(1.0 * u.kpc / u.s, 2.0 * u.kpc)
with pytest.raises(u.UnitsError):
CartesianDifferential(1.0 * u.kpc / u.s, 2.0 * u.kpc, 3.0 * u.kpc)
with pytest.raises(ValueError):
CartesianDifferential(1.0 * u.kpc, 2.0 * u.kpc, 3.0 * u.kpc, xyz_axis=1)
| TestCartesianDifferential |
python | getsentry__sentry | src/sentry/insights/endpoints/starred_segments.py | {
"start": 954,
"end": 3419
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.DATA_BROWSING
permission_classes = (MemberPermission,)
def has_feature(self, organization, request):
return features.has(
"organizations:insights-modules-use-eap", organization, actor=request.user
)
def post(self, request: Request, organization: Organization) -> Response:
"""
Star a segment for the current organization member.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
serializer = StarSegmentSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
segment_name = serializer.validated_data["segment_name"]
project_id = serializer.validated_data["project_id"]
with atomic_transaction(using=router.db_for_write(InsightsStarredSegment)):
_, created = InsightsStarredSegment.objects.get_or_create(
organization=organization,
project_id=project_id,
user_id=request.user.id,
segment_name=segment_name,
)
if not created:
return Response(status=status.HTTP_403_FORBIDDEN)
return Response(status=status.HTTP_200_OK)
def delete(self, request: Request, organization: Organization) -> Response:
"""
Delete a starred segment for the current organization member.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
if not self.has_feature(organization, request):
return self.respond(status=404)
serializer = StarSegmentSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
segment_name = serializer.validated_data["segment_name"]
project_id = serializer.validated_data["project_id"]
InsightsStarredSegment.objects.filter(
organization=organization,
user_id=request.user.id,
project_id=project_id,
segment_name=segment_name,
).delete()
return Response(status=status.HTTP_200_OK)
| InsightsStarredSegmentsEndpoint |
python | ray-project__ray | python/ray/_private/runtime_env/uv.py | {
"start": 7742,
"end": 12336
} | class ____(RuntimeEnvPlugin):
name = "uv"
def __init__(self, resources_dir: str):
self._uv_resource_dir = os.path.join(resources_dir, "uv")
self._creating_task = {}
# Maps a URI to a lock that is used to prevent multiple concurrent
# installs of the same virtualenv, see #24513
self._create_locks: Dict[str, asyncio.Lock] = {}
# Key: created hashes. Value: size of the uv dir.
self._created_hash_bytes: Dict[str, int] = {}
try_to_create_directory(self._uv_resource_dir)
def _get_path_from_hash(self, hash_val: str) -> str:
"""Generate a path from the hash of a uv spec.
Example output:
/tmp/ray/session_2021-11-03_16-33-59_356303_41018/runtime_resources
/uv/ray-9a7972c3a75f55e976e620484f58410c920db091
"""
return os.path.join(self._uv_resource_dir, hash_val)
def get_uris(self, runtime_env: "RuntimeEnv") -> List[str]: # noqa: F821
"""Return the uv URI from the RuntimeEnv if it exists, else return []."""
uv_uri = runtime_env.uv_uri()
if uv_uri:
return [uv_uri]
return []
def delete_uri(
self, uri: str, logger: Optional[logging.Logger] = default_logger
) -> int:
"""Delete URI and return the number of bytes deleted."""
logger.info("Got request to delete uv URI %s", uri)
protocol, hash_val = parse_uri(uri)
if protocol != Protocol.UV:
raise ValueError(
"UvPlugin can only delete URIs with protocol "
f"uv. Received protocol {protocol}, URI {uri}"
)
# Cancel running create task.
task = self._creating_task.pop(hash_val, None)
if task is not None:
task.cancel()
del self._created_hash_bytes[hash_val]
uv_env_path = self._get_path_from_hash(hash_val)
local_dir_size = get_directory_size_bytes(uv_env_path)
del self._create_locks[uri]
try:
shutil.rmtree(uv_env_path)
except OSError as e:
logger.warning(f"Error when deleting uv env {uv_env_path}: {str(e)}")
return 0
return local_dir_size
async def create(
self,
uri: str,
runtime_env: "RuntimeEnv", # noqa: F821
context: "RuntimeEnvContext", # noqa: F821
logger: Optional[logging.Logger] = default_logger,
) -> int:
if not runtime_env.has_uv():
return 0
protocol, hash_val = parse_uri(uri)
target_dir = self._get_path_from_hash(hash_val)
async def _create_for_hash():
await UvProcessor(
target_dir,
runtime_env,
logger,
)
loop = get_running_loop()
return await loop.run_in_executor(
None, get_directory_size_bytes, target_dir
)
if uri not in self._create_locks:
# async lock to prevent the same virtualenv being concurrently installed
self._create_locks[uri] = asyncio.Lock()
async with self._create_locks[uri]:
if hash_val in self._created_hash_bytes:
return self._created_hash_bytes[hash_val]
self._creating_task[hash_val] = task = create_task(_create_for_hash())
task.add_done_callback(lambda _: self._creating_task.pop(hash_val, None))
uv_dir_bytes = await task
self._created_hash_bytes[hash_val] = uv_dir_bytes
return uv_dir_bytes
def modify_context(
self,
uris: List[str],
runtime_env: "RuntimeEnv", # noqa: F821
context: "RuntimeEnvContext", # noqa: F821
logger: logging.Logger = default_logger,
):
if not runtime_env.has_uv():
return
# UvPlugin only uses a single URI.
uri = uris[0]
# Update py_executable.
protocol, hash_val = parse_uri(uri)
target_dir = self._get_path_from_hash(hash_val)
virtualenv_python = virtualenv_utils.get_virtualenv_python(target_dir)
if not os.path.exists(virtualenv_python):
raise ValueError(
f"Local directory {target_dir} for URI {uri} does "
"not exist on the cluster. Something may have gone wrong while "
"installing the runtime_env `uv` packages."
)
context.py_executable = virtualenv_python
context.command_prefix += virtualenv_utils.get_virtualenv_activate_command(
target_dir
)
| UvPlugin |
python | jazzband__django-formtools | tests/wizard/wizardtests/tests.py | {
"start": 10670,
"end": 11679
} | class ____(WizardTests, TestCase):
wizard_url = '/wiz_session/'
wizard_step_1_data = {
'session_contact_wizard-current_step': 'form1',
}
wizard_step_data = (
{
'form1-name': 'Pony',
'form1-thirsty': '2',
'session_contact_wizard-current_step': 'form1',
},
{
'form2-address1': '123 Main St',
'form2-address2': 'Djangoland',
'session_contact_wizard-current_step': 'form2',
},
{
'form3-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form3',
},
{
'form4-INITIAL_FORMS': '0',
'form4-TOTAL_FORMS': '2',
'form4-MAX_NUM_FORMS': '0',
'form4-0-random_crap': 'blah blah',
'form4-1-random_crap': 'blah blah',
'session_contact_wizard-current_step': 'form4',
}
)
@override_settings(ROOT_URLCONF='tests.wizard.wizardtests.urls')
| SessionWizardTests |
python | wandb__wandb | wandb/filesync/step_checksum.py | {
"start": 813,
"end": 992
} | class ____(NamedTuple):
artifact_id: str
finalize: bool
before_commit: step_upload.PreCommitFn
result_future: "concurrent.futures.Future[None]"
| RequestCommitArtifact |
python | pypa__pipenv | pipenv/vendor/tomlkit/exceptions.py | {
"start": 3869,
"end": 4200
} | class ____(ParseError):
"""
An error that indicates a bug in the parser.
"""
def __init__(self, line: int, col: int, message: str | None = None) -> None:
msg = "Internal parser error"
if message:
msg += f" ({message})"
super().__init__(line, col, message=msg)
| InternalParserError |
python | astropy__astropy | astropy/wcs/wcsapi/tests/test_high_level_api.py | {
"start": 3634,
"end": 5169
} | class ____(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
WCS with serialized classes
"""
@property
def serialized_classes(self):
return True
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test": (
"astropy.units.Quantity",
(),
{"unit": ("astropy.units.Unit", ("deg",), {})},
)
}
def test_serialized_classes():
wcs = SerializedWCS()
q = wcs.pixel_to_world(1)
assert isinstance(q, Quantity)
x = wcs.world_to_pixel(q)
assert_allclose(x, 1)
def test_objects_to_values():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
values = high_level_objects_to_values(c1, c2, low_level_wcs=wcs)
assert np.allclose(values, [2, 4, 6, 8])
def test_values_to_objects():
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
c1_out, c2_out = values_to_high_level_objects(*[2, 4, 6, 8], low_level_wcs=wcs)
assert c1.ra == c1_out.ra
assert c2.l == c2_out.l
assert c1.dec == c1_out.dec
assert c2.b == c2_out.b
| SerializedWCS |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_8207.py | {
"start": 93,
"end": 354
} | class ____:
def __init__(self):
self.offset = -10
def minus_offset(self):
return {
(x, x): value
for x, row in enumerate([(5, 10), (20, 30)])
for y, value in enumerate(row, -self.offset)
}
| Example |
python | scrapy__scrapy | scrapy/core/scraper.py | {
"start": 3297,
"end": 20557
} | class ____:
def __init__(self, crawler: Crawler) -> None:
self.slot: Slot | None = None
self.spidermw: SpiderMiddlewareManager = SpiderMiddlewareManager.from_crawler(
crawler
)
itemproc_cls: type[ItemPipelineManager] = load_object(
crawler.settings["ITEM_PROCESSOR"]
)
self.itemproc: ItemPipelineManager = itemproc_cls.from_crawler(crawler)
self._itemproc_has_async: dict[str, bool] = {}
for method in [
"open_spider",
"close_spider",
"process_item",
]:
self._check_deprecated_itemproc_method(method)
self.concurrent_items: int = crawler.settings.getint("CONCURRENT_ITEMS")
self.crawler: Crawler = crawler
self.signals: SignalManager = crawler.signals
assert crawler.logformatter
self.logformatter: LogFormatter = crawler.logformatter
def _check_deprecated_itemproc_method(self, method: str) -> None:
itemproc_cls = type(self.itemproc)
if not hasattr(self.itemproc, "process_item_async"):
warnings.warn(
f"{global_object_name(itemproc_cls)} doesn't define a {method}_async() method,"
f" this is deprecated and the method will be required in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
elif (
issubclass(itemproc_cls, ItemPipelineManager)
and method_is_overridden(itemproc_cls, ItemPipelineManager, method)
and not method_is_overridden(
itemproc_cls, ItemPipelineManager, f"{method}_async"
)
):
warnings.warn(
f"{global_object_name(itemproc_cls)} overrides {method}() but doesn't override {method}_async()."
f" This is deprecated. {method}() will be used, but in future Scrapy versions {method}_async() will be used instead.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
else:
self._itemproc_has_async[method] = True
def open_spider(self, spider: Spider | None = None) -> Deferred[None]:
warnings.warn(
"Scraper.open_spider() is deprecated, use open_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.open_spider_async())
async def open_spider_async(self) -> None:
"""Open the spider for scraping and allocate resources for it.
.. versionadded:: VERSION
"""
self.slot = Slot(self.crawler.settings.getint("SCRAPER_SLOT_MAX_ACTIVE_SIZE"))
if not self.crawler.spider:
raise RuntimeError(
"Scraper.open_spider() called before Crawler.spider is set."
)
if self._itemproc_has_async["open_spider"]:
await self.itemproc.open_spider_async()
else:
await maybe_deferred_to_future(
self.itemproc.open_spider(self.crawler.spider)
)
def close_spider(self, spider: Spider | None = None) -> Deferred[None]:
warnings.warn(
"Scraper.close_spider() is deprecated, use close_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_spider_async())
async def close_spider_async(self) -> None:
"""Close the spider being scraped and release its resources.
.. versionadded:: VERSION
"""
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
self.slot.closing = Deferred()
self._check_if_closing()
await maybe_deferred_to_future(self.slot.closing)
if self._itemproc_has_async["close_spider"]:
await self.itemproc.close_spider_async()
else:
assert self.crawler.spider
await maybe_deferred_to_future(
self.itemproc.close_spider(self.crawler.spider)
)
def is_idle(self) -> bool:
"""Return True if there isn't any more spiders to process"""
return not self.slot
def _check_if_closing(self) -> None:
assert self.slot is not None # typing
if self.slot.closing and self.slot.is_idle():
assert self.crawler.spider
self.slot.closing.callback(self.crawler.spider)
@inlineCallbacks
@_warn_spider_arg
def enqueue_scrape(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Generator[Deferred[Any], Any, None]:
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
dfd = self.slot.add_response_request(result, request)
self._scrape_next()
try:
yield dfd # fired in _wait_for_processing()
except Exception:
logger.error(
"Scraper bug processing %(request)s",
{"request": request},
exc_info=True,
extra={"spider": self.crawler.spider},
)
finally:
self.slot.finish_response(result, request)
self._check_if_closing()
self._scrape_next()
def _scrape_next(self) -> None:
assert self.slot is not None # typing
while self.slot.queue:
result, request, queue_dfd = self.slot.next_response_request_deferred()
_schedule_coro(self._wait_for_processing(result, request, queue_dfd))
async def _scrape(self, result: Response | Failure, request: Request) -> None:
"""Handle the downloaded response or failure through the spider callback/errback."""
if not isinstance(result, (Response, Failure)):
raise TypeError(
f"Incorrect type: expected Response or Failure, got {type(result)}: {result!r}"
)
output: Iterable[Any] | AsyncIterator[Any]
if isinstance(result, Response):
try:
# call the spider middlewares and the request callback with the response
output = await self.spidermw.scrape_response_async(
self.call_spider_async, result, request
)
except Exception:
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
return
try:
# call the request errback with the downloader error
output = await self.call_spider_async(result, request)
except Exception as spider_exc:
# the errback didn't silence the exception
assert self.crawler.spider
if not result.check(IgnoreRequest):
logkws = self.logformatter.download_error(
result, request, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=failure_to_exc_info(result),
)
if spider_exc is not result.value:
# the errback raised a different exception, handle it
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
async def _wait_for_processing(
self, result: Response | Failure, request: Request, queue_dfd: Deferred[None]
) -> None:
try:
await self._scrape(result, request)
except Exception:
queue_dfd.errback(Failure())
else:
queue_dfd.callback(None) # awaited in enqueue_scrape()
def call_spider(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Deferred[Iterable[Any] | AsyncIterator[Any]]:
warnings.warn(
"Scraper.call_spider() is deprecated, use call_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.call_spider_async(result, request))
async def call_spider_async(
self, result: Response | Failure, request: Request
) -> Iterable[Any] | AsyncIterator[Any]:
"""Call the request callback or errback with the response or failure.
.. versionadded:: 2.13
"""
await _defer_sleep_async()
assert self.crawler.spider
if isinstance(result, Response):
if getattr(result, "request", None) is None:
result.request = request
assert result.request
callback = result.request.callback or self.crawler.spider._parse
warn_on_generator_with_return_value(self.crawler.spider, callback)
output = callback(result, **result.request.cb_kwargs)
else: # result is a Failure
# TODO: properly type adding this attribute to a Failure
result.request = request # type: ignore[attr-defined]
if not request.errback:
result.raiseException()
warn_on_generator_with_return_value(self.crawler.spider, request.errback)
output = request.errback(result)
if isinstance(output, Failure):
output.raiseException()
# else the errback returned actual output (like a callback),
# which needs to be passed to iterate_spider_output()
return await ensure_awaitable(iterate_spider_output(output))
@_warn_spider_arg
def handle_spider_error(
self,
_failure: Failure,
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> None:
"""Handle an exception raised by a spider callback or errback."""
assert self.crawler.spider
exc = _failure.value
if isinstance(exc, CloseSpider):
assert self.crawler.engine is not None # typing
_schedule_coro(
self.crawler.engine.close_spider_async(reason=exc.reason or "cancelled")
)
return
logkws = self.logformatter.spider_error(
_failure, request, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
exc_info=failure_to_exc_info(_failure),
extra={"spider": self.crawler.spider},
)
self.signals.send_catch_log(
signal=signals.spider_error,
failure=_failure,
response=response,
spider=self.crawler.spider,
)
assert self.crawler.stats
self.crawler.stats.inc_value("spider_exceptions/count")
self.crawler.stats.inc_value(
f"spider_exceptions/{_failure.value.__class__.__name__}"
)
def handle_spider_output(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> Deferred[None]:
"""Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel."""
warnings.warn(
"Scraper.handle_spider_output() is deprecated, use handle_spider_output_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(
self.handle_spider_output_async(result, request, response)
)
async def handle_spider_output_async(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
) -> None:
"""Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel.
.. versionadded:: 2.13
"""
it: Iterable[_T] | AsyncIterator[_T]
if is_asyncio_available():
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
else:
it = iter_errback(result, self.handle_spider_error, request, response)
await _parallel_asyncio(
it, self.concurrent_items, self._process_spidermw_output_async, response
)
return
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel_async(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
return
it = iter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
def _process_spidermw_output(
self, output: Any, response: Response | Failure
) -> Deferred[None]:
"""Process each Request/Item (given in the output parameter) returned
from the given spider.
Items are sent to the item pipelines, requests are scheduled.
"""
return deferred_from_coro(self._process_spidermw_output_async(output, response))
async def _process_spidermw_output_async(
self, output: Any, response: Response | Failure
) -> None:
"""Process each Request/Item (given in the output parameter) returned
from the given spider.
Items are sent to the item pipelines, requests are scheduled.
"""
if isinstance(output, Request):
assert self.crawler.engine is not None # typing
self.crawler.engine.crawl(request=output)
return
if output is not None:
await self.start_itemproc_async(output, response=response)
def start_itemproc(
self, item: Any, *, response: Response | Failure | None
) -> Deferred[None]:
"""Send *item* to the item pipelines for processing.
*response* is the source of the item data. If the item does not come
from response data, e.g. it was hard-coded, set it to ``None``.
"""
warnings.warn(
"Scraper.start_itemproc() is deprecated, use start_itemproc_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.start_itemproc_async(item, response=response))
async def start_itemproc_async(
self, item: Any, *, response: Response | Failure | None
) -> None:
"""Send *item* to the item pipelines for processing.
*response* is the source of the item data. If the item does not come
from response data, e.g. it was hard-coded, set it to ``None``.
.. versionadded:: VERSION
"""
assert self.slot is not None # typing
assert self.crawler.spider is not None # typing
self.slot.itemproc_size += 1
try:
if self._itemproc_has_async["process_item"]:
output = await self.itemproc.process_item_async(item)
else:
output = await maybe_deferred_to_future(
self.itemproc.process_item(item, self.crawler.spider)
)
except DropItem as ex:
logkws = self.logformatter.dropped(item, ex, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_dropped,
item=item,
response=response,
spider=self.crawler.spider,
exception=ex,
)
except Exception as ex:
logkws = self.logformatter.item_error(
item, ex, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=True,
)
await self.signals.send_catch_log_async(
signal=signals.item_error,
item=item,
response=response,
spider=self.crawler.spider,
failure=Failure(),
)
else:
logkws = self.logformatter.scraped(output, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_scraped,
item=output,
response=response,
spider=self.crawler.spider,
)
finally:
self.slot.itemproc_size -= 1
| Scraper |
python | sympy__sympy | sympy/plotting/pygletplot/plot_object.py | {
"start": 0,
"end": 330
} | class ____:
"""
Base class for objects which can be displayed in
a Plot.
"""
visible = True
def _draw(self):
if self.visible:
self.draw()
def draw(self):
"""
OpenGL rendering code for the plot object.
Override in base class.
"""
pass
| PlotObject |
python | graphql-python__graphene | graphene/relay/tests/test_node_custom.py | {
"start": 785,
"end": 1039
} | class ____(ObjectType):
class Meta:
interfaces = [CustomNode, BasePhoto]
user_data = {"1": User(id="1", name="John Doe"), "2": User(id="2", name="Jane Smith")}
photo_data = {"3": Photo(id="3", width=300), "4": Photo(id="4", width=400)}
| Photo |
python | kamyu104__LeetCode-Solutions | Python/avoid-flood-in-the-city.py | {
"start": 67,
"end": 907
} | class ____(object):
def avoidFlood(self, rains):
"""
:type rains: List[int]
:rtype: List[int]
"""
lookup = collections.defaultdict(list)
i = len(rains)-1
for lake in reversed(rains):
lookup[lake].append(i)
i -= 1
result, min_heap = [], []
for i, lake in enumerate(rains):
if lake:
if len(lookup[lake]) >= 2:
lookup[lake].pop()
heapq.heappush(min_heap, lookup[lake][-1])
result.append(-1)
elif min_heap:
j = heapq.heappop(min_heap)
if j < i:
return []
result.append(rains[j])
else:
result.append(1)
return result if not min_heap else []
| Solution |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 61945,
"end": 63178
} | class ____:
@pytest.mark.parametrize("rvs_loc", [1e-5, 1e10])
@pytest.mark.parametrize("rvs_scale", [1e-2, 1e8])
@pytest.mark.parametrize('fix_loc', [True, False])
@pytest.mark.parametrize('fix_scale', [True, False])
def test_fit_MLE_comp_optimizer(self, rvs_loc, rvs_scale,
fix_loc, fix_scale):
rng = np.random.default_rng(6762668991392531563)
data = stats.halfnorm.rvs(loc=rvs_loc, scale=rvs_scale, size=1000,
random_state=rng)
if fix_loc and fix_scale:
error_msg = ("All parameters fixed. There is nothing to "
"optimize.")
with pytest.raises(RuntimeError, match=error_msg):
stats.halfcauchy.fit(data, floc=rvs_loc, fscale=rvs_scale)
return
kwds = {}
if fix_loc:
kwds['floc'] = rvs_loc
if fix_scale:
kwds['fscale'] = rvs_scale
_assert_less_or_close_loglike(stats.halfcauchy, data, **kwds)
def test_fit_error(self):
# `floc` bigger than the minimal data point
with pytest.raises(FitDataError):
stats.halfcauchy.fit([1, 2, 3], floc=2)
| TestHalfCauchy |
python | doocs__leetcode | solution/3100-3199/3152.Special Array II/Solution.py | {
"start": 0,
"end": 308
} | class ____:
def isArraySpecial(self, nums: List[int], queries: List[List[int]]) -> List[bool]:
n = len(nums)
d = list(range(n))
for i in range(1, n):
if nums[i] % 2 != nums[i - 1] % 2:
d[i] = d[i - 1]
return [d[t] <= f for f, t in queries]
| Solution |
python | ray-project__ray | python/ray/serve/_private/application_state.py | {
"start": 3959,
"end": 6174
} | class ____:
app_status: ApplicationStatusInfo
name: str = ""
deployment_statuses: List[DeploymentStatusInfo] = field(default_factory=list)
def debug_string(self):
return json.dumps(asdict(self), indent=4)
def get_deployment_status(self, name: str) -> Optional[DeploymentStatusInfo]:
"""Get a deployment's status by name.
Args:
name: Deployment's name.
Returns:
Optional[DeploymentStatusInfo]: The status of the deployment if it exists,
otherwise None.
"""
for deployment_status in self.deployment_statuses:
if name == deployment_status.name:
return deployment_status
return None
def to_proto(self):
# Create a protobuf for the Serve Application info
app_status_proto = self.app_status.to_proto()
# Create protobufs for all individual deployment statuses
deployment_status_protos = map(
lambda status: status.to_proto(), self.deployment_statuses
)
# Create a protobuf list containing all the deployment status protobufs
deployment_status_proto_list = DeploymentStatusInfoListProto()
deployment_status_proto_list.deployment_status_infos.extend(
deployment_status_protos
)
# Return protobuf encapsulating application and deployment protos
return StatusOverviewProto(
name=self.name,
app_status=app_status_proto,
deployment_statuses=deployment_status_proto_list,
)
@classmethod
def from_proto(cls, proto: StatusOverviewProto) -> "StatusOverview":
# Recreate Serve Application info
app_status = ApplicationStatusInfo.from_proto(proto.app_status)
# Recreate deployment statuses
deployment_statuses = []
for info_proto in proto.deployment_statuses.deployment_status_infos:
deployment_statuses.append(DeploymentStatusInfo.from_proto(info_proto))
# Recreate StatusInfo
return cls(
app_status=app_status,
deployment_statuses=deployment_statuses,
name=proto.name,
)
@dataclass
| StatusOverview |
python | falconry__falcon | tests/asgi/test_hello_asgi.py | {
"start": 4624,
"end": 4758
} | class ____:
def on_get(self, req, resp):
pass
async def on_post(self, req, resp):
pass
| PartialCoroutineResource |
python | donnemartin__interactive-coding-challenges | graphs_trees/utils/results.py | {
"start": 0,
"end": 406
} | class ____(object):
def __init__(self):
self.results = []
def add_result(self, result):
# TODO: Clean this up
# Simplifies challenge coding and unit testing
# but makes this function look less appealing
self.results.append(int(str(result)))
def clear_results(self):
self.results = []
def __str__(self):
return str(self.results) | Results |
python | ray-project__ray | python/ray/_private/prometheus_exporter.py | {
"start": 2001,
"end": 9389
} | class ____(object):
"""Collector represents the Prometheus Collector object"""
def __init__(self, options=Options(), view_name_to_data_map=None):
if view_name_to_data_map is None:
view_name_to_data_map = {}
self._options = options
self._registry = options.registry
self._view_name_to_data_map = view_name_to_data_map
self._registered_views = {}
@property
def options(self):
"""Options to be used to configure the exporter"""
return self._options
@property
def registry(self):
"""Prometheus Collector Registry instance"""
return self._registry
@property
def view_name_to_data_map(self):
"""Map with all view data objects
that will be sent to Prometheus
"""
return self._view_name_to_data_map
@property
def registered_views(self):
"""Map with all registered views"""
return self._registered_views
def register_view(self, view):
"""register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {
"name": v_name,
"documentation": view.description,
"labels": list(map(sanitize, view.columns)),
"units": view.measure.unit,
}
self.registered_views[v_name] = desc
def add_view_data(self, view_data):
"""Add view data object to be sent to server"""
self.register_view(view_data.view)
v_name = get_view_name(self.options.namespace, view_data.view)
self.view_name_to_data_map[v_name] = view_data
# TODO: add start and end timestamp
def to_metric(self, desc, tag_values, agg_data, metrics_map):
"""to_metric translate the data that OpenCensus create
to Prometheus format, using Prometheus Metric object
:type desc: dict
:param desc: The map that describes view definition
:type tag_values: tuple of :class:
`~opencensus.tags.tag_value.TagValue`
:param object of opencensus.tags.tag_value.TagValue:
TagValue object used as label values
:type agg_data: object of :class:
`~opencensus.stats.aggregation_data.AggregationData`
:param object of opencensus.stats.aggregation_data.AggregationData:
Aggregated data that needs to be converted as Prometheus samples
:rtype: :class:`~prometheus_client.core.CounterMetricFamily` or
:class:`~prometheus_client.core.HistogramMetricFamily` or
:class:`~prometheus_client.core.UnknownMetricFamily` or
:class:`~prometheus_client.core.GaugeMetricFamily`
"""
metric_name = desc["name"]
metric_description = desc["documentation"]
label_keys = desc["labels"]
metric_units = desc["units"]
assert len(tag_values) == len(label_keys), (tag_values, label_keys)
# Prometheus requires that all tag values be strings hence
# the need to cast none to the empty string before exporting. See
# https://github.com/census-instrumentation/opencensus-python/issues/480
tag_values = [tv if tv else "" for tv in tag_values]
if isinstance(agg_data, aggregation_data_module.CountAggregationData):
metric = metrics_map.get(metric_name)
if not metric:
metric = CounterMetricFamily(
name=metric_name,
documentation=metric_description,
unit=metric_units,
labels=label_keys,
)
metrics_map[metric_name] = metric
metric.add_metric(labels=tag_values, value=agg_data.count_data)
return
elif isinstance(agg_data, aggregation_data_module.DistributionAggregationData):
assert agg_data.bounds == sorted(agg_data.bounds)
# buckets are a list of buckets. Each bucket is another list with
# a pair of bucket name and value, or a triple of bucket name,
# value, and exemplar. buckets need to be in order.
buckets = []
cum_count = 0 # Prometheus buckets expect cumulative count.
for ii, bound in enumerate(agg_data.bounds):
cum_count += agg_data.counts_per_bucket[ii]
bucket = [str(bound), cum_count]
buckets.append(bucket)
# Prometheus requires buckets to be sorted, and +Inf present.
# In OpenCensus we don't have +Inf in the bucket bonds so need to
# append it here.
buckets.append(["+Inf", agg_data.count_data])
metric = metrics_map.get(metric_name)
if not metric:
metric = HistogramMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics_map[metric_name] = metric
metric.add_metric(
labels=tag_values,
buckets=buckets,
sum_value=agg_data.sum,
)
return
elif isinstance(agg_data, aggregation_data_module.SumAggregationData):
metric = metrics_map.get(metric_name)
if not metric:
metric = UnknownMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics_map[metric_name] = metric
metric.add_metric(labels=tag_values, value=agg_data.sum_data)
return
elif isinstance(agg_data, aggregation_data_module.LastValueAggregationData):
metric = metrics_map.get(metric_name)
if not metric:
metric = GaugeMetricFamily(
name=metric_name,
documentation=metric_description,
labels=label_keys,
)
metrics_map[metric_name] = metric
metric.add_metric(labels=tag_values, value=agg_data.value)
return
else:
raise ValueError(f"unsupported aggregation type {type(agg_data)}")
def collect(self): # pragma: NO COVER
"""Collect fetches the statistics from OpenCensus
and delivers them as Prometheus Metrics.
Collect is invoked every time a prometheus.Gatherer is run
for example when the HTTP endpoint is invoked by Prometheus.
"""
# Make a shallow copy of self._view_name_to_data_map, to avoid seeing
# concurrent modifications when iterating through the dictionary.
metrics_map = {}
for v_name, view_data in self._view_name_to_data_map.copy().items():
if v_name not in self.registered_views:
continue
desc = self.registered_views[v_name]
for tag_values in view_data.tag_value_aggregation_data_map:
agg_data = view_data.tag_value_aggregation_data_map[tag_values]
self.to_metric(desc, tag_values, agg_data, metrics_map)
for metric in metrics_map.values():
yield metric
| Collector |
python | python-openxml__python-docx | tests/unitutil/cxml.py | {
"start": 1378,
"end": 8126
} | class ____:
"""
Represents an XML element, having a namespace, tagname, attributes, and
may contain either text or children (but not both) or may be empty.
"""
def __init__(self, tagname, attrs, text):
self._tagname = tagname
self._attrs = attrs
self._text = text
self._children = []
self._is_root = False
def __repr__(self):
"""
Provide a more meaningful repr value for an Element object, one that
displays the tagname as a simple empty element, e.g. ``<w:pPr/>``.
"""
return "<%s/>" % self._tagname
def connect_children(self, child_node_list):
"""
Make each of the elements appearing in `child_node_list` a child of
this element.
"""
for node in child_node_list:
child = node.element
self._children.append(child)
@classmethod
def from_token(cls, token):
"""
Return an ``Element`` object constructed from a parser element token.
"""
tagname = token.tagname
attrs = [tuple(a) for a in token.attr_list]
text = token.text
return cls(tagname, attrs, text)
@property
def is_root(self):
"""
|True| if this element is the root of the tree and should include the
namespace prefixes. |False| otherwise.
"""
return self._is_root
@is_root.setter
def is_root(self, value):
self._is_root = bool(value)
@property
def local_nspfxs(self):
"""
The namespace prefixes local to this element, both on the tagname and
all of its attributes. An empty string (``''``) is used to represent
the default namespace for an element tag having no prefix.
"""
def nspfx(name, is_element=False):
idx = name.find(":")
if idx == -1:
return "" if is_element else None
return name[:idx]
nspfxs = [nspfx(self._tagname, True)]
for name, val in self._attrs:
pfx = nspfx(name)
if pfx is None or pfx in nspfxs or pfx == "xml":
continue
nspfxs.append(pfx)
return nspfxs
@property
def nspfxs(self):
"""
A sequence containing each of the namespace prefixes appearing in
this tree. Each prefix appears once and only once, and in document
order.
"""
def merge(seq, seq_2):
for item in seq_2:
if item in seq:
continue
seq.append(item)
nspfxs = self.local_nspfxs
for child in self._children:
merge(nspfxs, child.nspfxs)
return nspfxs
@property
def xml(self):
"""
The XML corresponding to the tree rooted at this element,
pretty-printed using 2-spaces indentation at each level and with
a trailing '\n'.
"""
return self._xml(indent=0)
def _xml(self, indent):
"""
Return a string containing the XML of this element and all its
children with a starting indent of `indent` spaces.
"""
self._indent_str = " " * indent
xml = self._start_tag
for child in self._children:
xml += child._xml(indent + 2)
xml += self._end_tag
return xml
@property
def _start_tag(self):
"""
The text of the opening tag of this element, including attributes. If
this is the root element, a namespace declaration for each of the
namespace prefixes that occur in this tree is added in front of any
attributes. If this element contains text, that text follows the
start tag. If not, and this element has no children, an empty tag is
returned. Otherwise, an opening tag is returned, followed by
a newline. The tag is indented by this element's indent value in all
cases.
"""
_nsdecls = nsdecls(*self.nspfxs) if self.is_root else ""
tag = "%s<%s%s" % (self._indent_str, self._tagname, _nsdecls)
for attr in self._attrs:
name, value = attr
tag += ' %s="%s"' % (name, value)
if self._text:
tag += ">%s" % self._text
elif self._children:
tag += ">\n"
else:
tag += "/>\n"
return tag
@property
def _end_tag(self):
"""
The text of the closing tag of this element, if there is one. If the
element contains text, no leading indentation is included.
"""
if self._text:
tag = "</%s>\n" % self._tagname
elif self._children:
tag = "%s</%s>\n" % (self._indent_str, self._tagname)
else:
tag = ""
return tag
# ====================================================================
# parser
# ====================================================================
# parse actions ----------------------------------
def connect_node_children(s, loc, tokens):
node = tokens[0]
node.element.connect_children(node.child_node_list)
def connect_root_node_children(root_node):
root_node.element.connect_children(root_node.child_node_list)
root_node.element.is_root = True
def grammar():
# terminals ----------------------------------
colon = Literal(":")
equal = Suppress("=")
slash = Suppress("/")
open_paren = Suppress("(")
close_paren = Suppress(")")
open_brace = Suppress("{")
close_brace = Suppress("}")
# np:tagName ---------------------------------
nspfx = Word(alphas)
local_name = Word(alphanums)
tagname = Combine(nspfx + colon + local_name)
# np:attr_name=attr_val ----------------------
attr_name = Word(alphas + ":")
attr_val = Word(alphanums + " %-./:_")
attr_def = Group(attr_name + equal + attr_val)
attr_list = open_brace + delimitedList(attr_def) + close_brace
text = dblQuotedString.setParseAction(removeQuotes)
# w:jc{val=right} ----------------------------
element = (
tagname("tagname")
+ Group(Optional(attr_list))("attr_list")
+ Optional(text, default="")("text")
).setParseAction(Element.from_token)
child_node_list = Forward()
node = Group(
element("element") + Group(Optional(slash + child_node_list))("child_node_list")
).setParseAction(connect_node_children)
child_node_list << (open_paren + delimitedList(node) + close_paren | node)
root_node = (
element("element") + Group(Optional(slash + child_node_list))("child_node_list") + stringEnd
).setParseAction(connect_root_node_children)
return root_node
root_node = grammar()
| Element |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/transfers/test_oracle_to_azure_data_lake.py | {
"start": 1030,
"end": 4572
} | class ____:
mock_module_path = "airflow.providers.microsoft.azure.transfers.oracle_to_azure_data_lake"
def test_write_temp_file(self, tmp_path):
csv_path = tmp_path / "testfile.csv"
task_id = "some_test_id"
sql = "some_sql"
sql_params = {":p_data": "2018-01-01"}
oracle_conn_id = "oracle_conn_id"
azure_data_lake_conn_id = "azure_data_lake_conn_id"
azure_data_lake_path = "azure_data_lake_path"
delimiter = "|"
encoding = "utf-8"
cursor_description = [
("id", "<class 'oracledb.NUMBER'>", 39, None, 38, 0, 0),
("description", "<class 'oracledb.STRING'>", 60, 240, None, None, 1),
]
cursor_rows = [[1, "description 1"], [2, "description 2"]]
mock_cursor = MagicMock()
mock_cursor.description = cursor_description
mock_cursor.__iter__.return_value = cursor_rows
op = OracleToAzureDataLakeOperator(
task_id=task_id,
filename=csv_path.name,
oracle_conn_id=oracle_conn_id,
sql=sql,
sql_params=sql_params,
azure_data_lake_conn_id=azure_data_lake_conn_id,
azure_data_lake_path=azure_data_lake_path,
delimiter=delimiter,
encoding=encoding,
)
op._write_temp_file(mock_cursor, os.fspath(csv_path))
assert csv_path.exists()
with csv_path.open() as f:
csvr = csv.reader(f, delimiter=delimiter)
header = next(csvr)
assert header[0] == "id"
assert header[1] == "description"
for csv_rec, exp_rec in zip(csvr, cursor_rows):
assert csv_rec[0] == str(exp_rec[0])
assert csv_rec[1] == exp_rec[1]
@mock.patch(mock_module_path + ".OracleHook", autospec=True)
@mock.patch(mock_module_path + ".AzureDataLakeHook", autospec=True)
def test_execute(self, mock_data_lake_hook, mock_oracle_hook):
task_id = "some_test_id"
sql = "some_sql"
sql_params = {":p_data": "2018-01-01"}
oracle_conn_id = "oracle_conn_id"
filename = "some_filename"
azure_data_lake_conn_id = "azure_data_lake_conn_id"
azure_data_lake_path = "azure_data_lake_path"
delimiter = "|"
encoding = "latin-1"
cursor_description = [
("id", "<class 'oracledb.NUMBER'>", 39, None, 38, 0, 0),
("description", "<class 'oracledb.STRING'>", 60, 240, None, None, 1),
]
cursor_rows = [[1, "description 1"], [2, "description 2"]]
cursor_mock = MagicMock()
cursor_mock.description.return_value = cursor_description
cursor_mock.__iter__.return_value = cursor_rows
mock_oracle_conn = MagicMock()
mock_oracle_conn.cursor().return_value = cursor_mock
mock_oracle_hook.get_conn().return_value = mock_oracle_conn
op = OracleToAzureDataLakeOperator(
task_id=task_id,
filename=filename,
oracle_conn_id=oracle_conn_id,
sql=sql,
sql_params=sql_params,
azure_data_lake_conn_id=azure_data_lake_conn_id,
azure_data_lake_path=azure_data_lake_path,
delimiter=delimiter,
encoding=encoding,
)
op.execute(None)
mock_oracle_hook.assert_called_once_with(oracle_conn_id=oracle_conn_id)
mock_data_lake_hook.assert_called_once_with(azure_data_lake_conn_id=azure_data_lake_conn_id)
| TestOracleToAzureDataLakeTransfer |
python | PyCQA__pylint | tests/regrtest_data/dummy_plugin/dummy_plugin.py | {
"start": 422,
"end": 902
} | class ____(BaseChecker):
name = 'dummy_plugin'
msgs = {'I9060': ('Dummy short desc 02', 'dummy-message-02', 'Dummy long desc')}
options = (
('dummy_option_2', {
'type': 'string',
'metavar': '<string>',
'help': 'Dummy option 2',
'default': ''
}),
)
def register(linter: "PyLinter") -> None:
linter.register_checker(DummyPlugin1(linter))
linter.register_checker(DummyPlugin2(linter))
| DummyPlugin2 |
python | astropy__astropy | astropy/units/core.py | {
"start": 68024,
"end": 70115
} | class ____(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self) -> str:
return f"UnrecognizedUnit({self})"
def __bytes__(self) -> bytes:
return self.name.encode("ascii", "replace")
def __str__(self) -> str:
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
f"The unit {self.name!r} is unrecognized, so all arithmetic operations "
"with it are invalid."
)
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = _unrecognized_operator
__lt__ = __gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __hash__(self):
# __hash__ isn't inherited in classes with a custom __eq__ method
return self._hash
def __eq__(self, other):
try:
other = Unit(other, parse_strict="silent")
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
f"The unit {self.name!r} is unrecognized. It can not be converted "
"to other units."
)
def _get_format_name(self, format: str) -> str:
return self.name
def is_unity(self) -> Literal[False]:
return False
| UnrecognizedUnit |
python | django-extensions__django-extensions | django_extensions/management/commands/clear_cache.py | {
"start": 337,
"end": 1490
} | class ____(BaseCommand):
"""A simple management command which clears the site-wide cache."""
help = "Fully clear site-wide cache."
def add_arguments(self, parser):
parser.add_argument("--cache", action="append", help="Name of cache to clear")
parser.add_argument(
"--all",
"-a",
action="store_true",
default=False,
dest="all_caches",
help="Clear all configured caches",
)
@signalcommand
def handle(self, cache, all_caches, *args, **kwargs):
if not cache and not all_caches:
cache = [DEFAULT_CACHE_ALIAS]
elif cache and all_caches:
raise CommandError("Using both --all and --cache is not supported")
elif all_caches:
cache = getattr(settings, "CACHES", {DEFAULT_CACHE_ALIAS: {}}).keys()
for key in cache:
try:
caches[key].clear()
except InvalidCacheBackendError:
self.stderr.write('Cache "%s" is invalid!\n' % key)
else:
self.stdout.write('Cache "%s" has been cleared!\n' % key)
| Command |
python | pytorch__pytorch | torch/fx/experimental/migrate_gradual_types/constraint.py | {
"start": 282,
"end": 710
} | class ____(Constraint):
def __init__(self, conjuncts):
"""
:param conjuncts: Conjunction of constraints
"""
self.conjucts = conjuncts
def __eq__(self, other):
if isinstance(other, Conj):
return self.conjucts == other.conjucts and self.conjucts == other.conjucts
else:
return False
def __repr__(self):
return f"And({self.conjucts})"
| Conj |
python | Textualize__textual | src/textual/drivers/win32.py | {
"start": 1329,
"end": 1492
} | class ____(Structure):
"""https://docs.microsoft.com/en-us/windows/console/coord-str"""
_fields_ = [
("X", SHORT),
("Y", SHORT),
]
| COORD |
python | getsentry__sentry | src/sentry/sentry_apps/api/parsers/sentry_app.py | {
"start": 1229,
"end": 1675
} | class ____(serializers.Field):
def to_internal_value(self, data):
if data is None:
return
if not set(data).issubset(VALID_EVENT_RESOURCES):
raise ValidationError(
"Invalid event subscription: {}".format(
", ".join(set(data).difference(VALID_EVENT_RESOURCES))
)
)
return data
@extend_schema_field(OpenApiTypes.OBJECT)
| EventListField |
python | kamyu104__LeetCode-Solutions | Python/minimum-absolute-difference-in-bst.py | {
"start": 29,
"end": 539
} | class ____(object):
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def inorderTraversal(root, prev, result):
if not root:
return (result, prev)
result, prev = inorderTraversal(root.left, prev, result)
if prev: result = min(result, root.val - prev.val)
return inorderTraversal(root.right, root, result)
return inorderTraversal(root, None, float("inf"))[0]
| Solution |
python | jd__tenacity | tenacity/asyncio/retry.py | {
"start": 2611,
"end": 3220
} | class ____(async_retry_base):
"""Retries if the result verifies a predicate."""
def __init__(
self, predicate: typing.Callable[[typing.Any], typing.Awaitable[bool]]
) -> None:
self.predicate = predicate
async def __call__(self, retry_state: "RetryCallState") -> bool: # type: ignore[override]
if retry_state.outcome is None:
raise RuntimeError("__call__() called before outcome was set")
if not retry_state.outcome.failed:
return await self.predicate(retry_state.outcome.result())
else:
return False
| retry_if_result |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 8665,
"end": 9643
} | class ____(test_lib.TestCase):
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64] + \
[dtypes.bfloat16] if test_util.is_gpu_available(
cuda_only=True) else []:
x = constant_op.constant([1.0, 0.0, 3.0, 2.0],
shape=[2, 2],
name="x",
dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = self.evaluate(l2loss)
self.assertAllClose(7.0, value)
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x")
theoretical, numerical = gradient_checker_v2.compute_gradient(
nn_ops.l2_loss, [x])
self.assertAllClose(theoretical, numerical)
@test_util.run_all_in_graph_and_eager_modes
| L2LossTest |
python | huggingface__transformers | src/transformers/models/longt5/modeling_longt5.py | {
"start": 14157,
"end": 24506
} | class ____(nn.Module):
def __init__(
self,
config: LongT5Config,
has_relative_attention_bias=False,
layer_idx: Optional[int] = None,
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.gradient_checkpointing = False
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_values=None,
query_length=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
batch_size, seq_length = hidden_states.shape[:2]
# if key_value_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
| LongT5Attention |
python | pandas-dev__pandas | pandas/tests/scalar/timedelta/test_arithmetic.py | {
"start": 11424,
"end": 31582
} | class ____:
"""
Tests for Timedelta methods:
__mul__, __rmul__,
__div__, __rdiv__,
__truediv__, __rtruediv__,
__floordiv__, __rfloordiv__,
__mod__, __rmod__,
__divmod__, __rdivmod__
"""
# ---------------------------------------------------------------
# Timedelta.__mul__, __rmul__
@pytest.mark.parametrize(
"td_nat", [NaT, np.timedelta64("NaT", "ns"), np.timedelta64("NaT")]
)
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nat(self, op, td_nat):
# GH#19819
td = Timedelta(10, unit="D")
typs = "|".join(["numpy.timedelta64", "NaTType", "Timedelta"])
msg = "|".join(
[
rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'",
r"ufunc '?multiply'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
op(td, td_nat)
@pytest.mark.parametrize("nan", [np.nan, np.float64("NaN"), float("nan")])
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_nan(self, op, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="D")
result = op(td, nan)
assert result is NaT
@pytest.mark.parametrize("op", [operator.mul, ops.rmul])
def test_td_mul_scalar(self, op):
# GH#19738
td = Timedelta(minutes=3)
result = op(td, 2)
assert result == Timedelta(minutes=6)
result = op(td, 1.5)
assert result == Timedelta(minutes=4, seconds=30)
assert op(td, np.nan) is NaT
assert op(-1, td)._value == -1 * td._value
assert op(-1.0, td)._value == -1.0 * td._value
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
# timedelta * datetime is gibberish
op(td, Timestamp(2016, 1, 2))
with pytest.raises(TypeError, match=msg):
# invalid multiply with another timedelta
op(td, td)
def test_td_mul_numeric_ndarray(self):
td = Timedelta("1 day")
other = np.array([2])
expected = np.array([Timedelta("2 Days").to_timedelta64()])
result = td * other
tm.assert_numpy_array_equal(result, expected)
result = other * td
tm.assert_numpy_array_equal(result, expected)
def test_td_mul_numeric_ndarray_0d(self):
td = Timedelta("1 day")
other = np.array(2, dtype=np.int64)
assert other.ndim == 0
expected = Timedelta("2 days")
res = td * other
assert type(res) is Timedelta
assert res == expected
res = other * td
assert type(res) is Timedelta
assert res == expected
def test_td_mul_td64_ndarray_invalid(self):
td = Timedelta("1 day")
other = np.array([Timedelta("2 Days").to_timedelta64()])
msg = (
"ufunc '?multiply'? cannot use operands with types "
rf"dtype\('{tm.ENDIAN}m8\[ns\]'\) and dtype\('{tm.ENDIAN}m8\[ns\]'\)"
)
with pytest.raises(TypeError, match=msg):
td * other
with pytest.raises(TypeError, match=msg):
other * td
# ---------------------------------------------------------------
# Timedelta.__div__, __truediv__
def test_td_div_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="D")
result = td / offsets.Hour(1)
assert result == 240
assert td / td == 1
assert td / np.timedelta64(60, "h") == 4
assert np.isnan(td / NaT)
def test_td_div_td64_non_nano(self):
# truediv
td = Timedelta("1 days 2 hours 3 ns")
result = td / np.timedelta64(1, "D")
assert result == td._value / (86400 * 10**9)
result = td / np.timedelta64(1, "s")
assert result == td._value / 10**9
result = td / np.timedelta64(1, "ns")
assert result == td._value
# floordiv
td = Timedelta("1 days 2 hours 3 ns")
result = td // np.timedelta64(1, "D")
assert result == 1
result = td // np.timedelta64(1, "s")
assert result == 93600
result = td // np.timedelta64(1, "ns")
assert result == td._value
def test_td_div_numeric_scalar(self):
# GH#19738
td = Timedelta(10, unit="D")
result = td / 2
assert isinstance(result, Timedelta)
assert result == Timedelta(days=5)
result = td / 5
assert isinstance(result, Timedelta)
assert result == Timedelta(days=2)
@pytest.mark.parametrize(
"nan",
[
np.nan,
np.float64("NaN"),
float("nan"),
],
)
def test_td_div_nan(self, nan):
# np.float64('NaN') has a 'dtype' attr, avoid treating as array
td = Timedelta(10, unit="D")
result = td / nan
assert result is NaT
result = td // nan
assert result is NaT
def test_td_div_td64_ndarray(self):
td = Timedelta("1 day")
other = np.array([Timedelta("2 Days").to_timedelta64()])
expected = np.array([0.5])
result = td / other
tm.assert_numpy_array_equal(result, expected)
result = other / td
tm.assert_numpy_array_equal(result, expected * 4)
def test_td_div_ndarray_0d(self):
td = Timedelta("1 day")
other = np.array(1)
res = td / other
assert isinstance(res, Timedelta)
assert res == td
# ---------------------------------------------------------------
# Timedelta.__rdiv__
def test_td_rdiv_timedeltalike_scalar(self):
# GH#19738
td = Timedelta(10, unit="D")
result = offsets.Hour(1) / td
assert result == 1 / 240.0
assert np.timedelta64(60, "h") / td == 0.25
def test_td_rdiv_na_scalar(self):
# GH#31869 None gets cast to NaT
td = Timedelta(10, unit="D")
result = NaT / td
assert np.isnan(result)
result = None / td
assert np.isnan(result)
result = np.timedelta64("NaT") / td
assert np.isnan(result)
msg = r"unsupported operand type\(s\) for /: 'numpy.datetime64' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.datetime64("NaT") / td
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
np.nan / td
def test_td_rdiv_ndarray(self):
td = Timedelta(10, unit="D")
arr = np.array([td], dtype=object)
result = arr / td
expected = np.array([1], dtype=np.float64)
tm.assert_numpy_array_equal(result, expected)
arr = np.array([None])
result = arr / td
expected = np.array([np.nan])
tm.assert_numpy_array_equal(result, expected)
arr = np.array([np.nan], dtype=object)
msg = r"unsupported operand type\(s\) for /: 'float' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
arr / td
arr = np.array([np.nan], dtype=np.float64)
msg = "cannot use operands with types dtype"
with pytest.raises(TypeError, match=msg):
arr / td
def test_td_rdiv_ndarray_0d(self):
td = Timedelta(10, unit="D")
arr = np.array(td.asm8)
assert arr / td == 1
# ---------------------------------------------------------------
# Timedelta.__floordiv__
def test_td_floordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
assert td // scalar == 1
assert -td // scalar.to_pytimedelta() == -2
assert (2 * td) // scalar.to_timedelta64() == 2
def test_td_floordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
assert td // np.nan is NaT
assert np.isnan(td // NaT)
assert np.isnan(td // np.timedelta64("NaT"))
def test_td_floordiv_offsets(self):
# GH#19738
td = Timedelta(hours=3, minutes=4)
assert td // offsets.Hour(1) == 3
assert td // offsets.Minute(2) == 92
def test_td_floordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
msg = "|".join(
[
r"Invalid dtype datetime64\[D\] for __floordiv__",
"'dtype' is an invalid keyword argument for this function",
"this function got an unexpected keyword argument 'dtype'",
r"ufunc '?floor_divide'? cannot use operands with types",
]
)
with pytest.raises(TypeError, match=msg):
td // np.datetime64("2016-01-01", dtype="datetime64[us]")
def test_td_floordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
expected = Timedelta(hours=1, minutes=32)
assert td // 2 == expected
assert td // 2.0 == expected
assert td // np.float64(2.0) == expected
assert td // np.int32(2.0) == expected
assert td // np.uint8(2.0) == expected
def test_td_floordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
scalar = Timedelta(hours=3, minutes=3)
# Array-like others
assert td // np.array(scalar.to_timedelta64()) == 1
res = (3 * td) // np.array([scalar.to_timedelta64()])
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
res = (10 * td) // np.array([scalar.to_timedelta64(), np.timedelta64("NaT")])
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_floordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=4)
ser = pd.Series([1], dtype=np.int64)
res = td // ser
assert res.dtype.kind == "m"
# ---------------------------------------------------------------
# Timedelta.__rfloordiv__
def test_td_rfloordiv_timedeltalike_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# scalar others
# x // Timedelta is defined only for timedelta-like x. int-like,
# float-like, and date-like, in particular, should all either
# a) raise TypeError directly or
# b) return NotImplemented, following which the reversed
# operation will raise TypeError.
assert td.__rfloordiv__(scalar) == 1
assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2
assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0
def test_td_rfloordiv_null_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert np.isnan(td.__rfloordiv__(NaT))
assert np.isnan(td.__rfloordiv__(np.timedelta64("NaT")))
def test_td_rfloordiv_offsets(self):
# GH#19738
assert offsets.Hour(1) // Timedelta(minutes=25) == 2
def test_td_rfloordiv_invalid_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
dt64 = np.datetime64("2016-01-01", "us")
assert td.__rfloordiv__(dt64) is NotImplemented
msg = (
r"unsupported operand type\(s\) for //: 'numpy.datetime64' and 'Timedelta'"
)
with pytest.raises(TypeError, match=msg):
dt64 // td
def test_td_rfloordiv_numeric_scalar(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
assert td.__rfloordiv__(np.nan) is NotImplemented
assert td.__rfloordiv__(3.5) is NotImplemented
assert td.__rfloordiv__(2) is NotImplemented
assert td.__rfloordiv__(np.float64(2.0)) is NotImplemented
assert td.__rfloordiv__(np.uint8(9)) is NotImplemented
assert td.__rfloordiv__(np.int32(2.0)) is NotImplemented
msg = r"unsupported operand type\(s\) for //: '.*' and 'Timedelta"
with pytest.raises(TypeError, match=msg):
np.float64(2.0) // td
with pytest.raises(TypeError, match=msg):
np.uint8(9) // td
with pytest.raises(TypeError, match=msg):
# deprecated GH#19761, enforced GH#29797
np.int32(2.0) // td
def test_td_rfloordiv_timedeltalike_array(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
scalar = Timedelta(hours=3, minutes=4)
# Array-like others
assert td.__rfloordiv__(np.array(scalar.to_timedelta64())) == 1
res = td.__rfloordiv__(np.array([(3 * scalar).to_timedelta64()]))
expected = np.array([3], dtype=np.int64)
tm.assert_numpy_array_equal(res, expected)
arr = np.array([(10 * scalar).to_timedelta64(), np.timedelta64("NaT")])
res = td.__rfloordiv__(arr)
expected = np.array([10, np.nan])
tm.assert_numpy_array_equal(res, expected)
def test_td_rfloordiv_intarray(self):
# deprecated GH#19761, enforced GH#29797
ints = np.array([1349654400, 1349740800, 1349827200, 1349913600]) * 10**9
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
ints // Timedelta(1, unit="s")
def test_td_rfloordiv_numeric_series(self):
# GH#18846
td = Timedelta(hours=3, minutes=3)
ser = pd.Series([1], dtype=np.int64)
res = td.__rfloordiv__(ser)
assert res is NotImplemented
msg = "Invalid dtype"
with pytest.raises(TypeError, match=msg):
# Deprecated GH#19761, enforced GH#29797
ser // td
# ----------------------------------------------------------------
# Timedelta.__mod__, __rmod__
def test_mod_timedeltalike(self):
# GH#19365
td = Timedelta(hours=37)
# Timedelta-like others
result = td % Timedelta(hours=6)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
result = td % timedelta(minutes=60)
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % NaT
assert result is NaT
def test_mod_timedelta64_nat(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64("NaT", "ns")
assert result is NaT
def test_mod_timedelta64(self):
# GH#19365
td = Timedelta(hours=37)
result = td % np.timedelta64(2, "h")
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=1)
def test_mod_offset(self):
# GH#19365
td = Timedelta(hours=37)
result = td % offsets.Hour(5)
assert isinstance(result, Timedelta)
assert result == Timedelta(hours=2)
def test_mod_numeric(self):
# GH#19365
td = Timedelta(hours=37)
# Numeric Others
result = td % 2
assert isinstance(result, Timedelta)
assert result == Timedelta(0)
result = td % 1e12
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
result = td % int(1e12)
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=3, seconds=20)
def test_mod_invalid(self):
# GH#19365
td = Timedelta(hours=37)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
td % Timestamp("2018-01-22")
with pytest.raises(TypeError, match=msg):
td % []
def test_rmod_pytimedelta(self):
# GH#19365
td = Timedelta(minutes=3)
result = timedelta(minutes=4) % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=1)
def test_rmod_timedelta64(self):
# GH#19365
td = Timedelta(minutes=3)
result = np.timedelta64(5, "m") % td
assert isinstance(result, Timedelta)
assert result == Timedelta(minutes=2)
def test_rmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand"
with pytest.raises(TypeError, match=msg):
Timestamp("2018-01-22") % td
with pytest.raises(TypeError, match=msg):
15 % td
with pytest.raises(TypeError, match=msg):
16.0 % td
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
np.array([22, 24]) % td
# ----------------------------------------------------------------
# Timedelta.__divmod__, __rdivmod__
def test_divmod_numeric(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, 53 * 3600 * 1e9)
assert result[0] == Timedelta(1, unit="ns")
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=1)
assert result
result = divmod(td, np.nan)
assert result[0] is NaT
assert result[1] is NaT
def test_divmod(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
result = divmod(td, 54)
assert result[0] == Timedelta(hours=1)
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(0)
result = divmod(td, NaT)
assert np.isnan(result[0])
assert result[1] is NaT
def test_divmod_offset(self):
# GH#19365
td = Timedelta(days=2, hours=6)
result = divmod(td, offsets.Hour(-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_divmod_invalid(self):
# GH#19365
td = Timedelta(days=2, hours=6)
msg = r"unsupported operand type\(s\) for //: 'Timedelta' and 'Timestamp'"
with pytest.raises(TypeError, match=msg):
divmod(td, Timestamp("2018-01-22"))
def test_rdivmod_pytimedelta(self):
# GH#19365
result = divmod(timedelta(days=2, hours=6), Timedelta(days=1))
assert result[0] == 2
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=6)
def test_rdivmod_offset(self):
result = divmod(offsets.Hour(54), Timedelta(hours=-4))
assert result[0] == -14
assert isinstance(result[1], Timedelta)
assert result[1] == Timedelta(hours=-2)
def test_rdivmod_invalid(self):
# GH#19365
td = Timedelta(minutes=3)
msg = "unsupported operand type"
with pytest.raises(TypeError, match=msg):
divmod(Timestamp("2018-01-22"), td)
with pytest.raises(TypeError, match=msg):
divmod(15, td)
with pytest.raises(TypeError, match=msg):
divmod(16.0, td)
msg = "Invalid dtype int"
with pytest.raises(TypeError, match=msg):
divmod(np.array([22, 24]), td)
# ----------------------------------------------------------------
@pytest.mark.parametrize(
"op", [operator.mul, ops.rmul, operator.truediv, ops.rdiv, ops.rsub]
)
@pytest.mark.parametrize(
"arr",
[
[Timestamp("20130101 9:01"), Timestamp("20121230 9:02")],
[Timestamp("2021-11-09 09:54:00"), Timedelta("1D")],
],
)
def test_td_op_timedelta_timedeltalike_array(self, op, arr):
arr = np.array(arr)
msg = "unsupported operand type|cannot use operands with types"
with pytest.raises(TypeError, match=msg):
op(arr, Timedelta("1D"))
def test_mul_bool_invalid(self):
# GH#62316
msg = "Cannot multiply Timedelta by bool. Explicitly cast to integer"
with pytest.raises(TypeError, match=msg):
Timedelta("1 day") * True
| TestTimedeltaMultiplicationDivision |
python | django__django | tests/admin_views/models.py | {
"start": 435,
"end": 836
} | class ____(models.Model):
"""
A simple section that links to articles, to test linking to related items
in admin views.
"""
name = models.CharField(max_length=100)
def __str__(self):
return self.name
@property
def name_property(self):
"""
A property that simply returns the name. Used to test #24461
"""
return self.name
| Section |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 143959,
"end": 144254
} | class ____(BaseModel, extra="forbid"):
points: List["PointVectors"] = Field(..., description="Points with named vectors")
shard_key: Optional["ShardKeySelector"] = Field(default=None, description="")
update_filter: Optional["Filter"] = Field(default=None, description="")
| UpdateVectors |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_select.py | {
"start": 51518,
"end": 52910
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
__requires__ = ("computed_columns",)
@classmethod
def define_tables(cls, metadata):
Table(
"square",
metadata,
Column("id", Integer, primary_key=True),
Column("side", Integer),
Column("area", Integer, Computed("side * side")),
Column("perimeter", Integer, Computed("4 * side")),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.square.insert(),
[{"id": 1, "side": 10}, {"id": 10, "side": 42}],
)
def test_select_all(self):
with config.db.connect() as conn:
res = conn.execute(
select(text("*"))
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(1, 10, 100, 40), (10, 42, 1764, 168)])
def test_select_columns(self):
with config.db.connect() as conn:
res = conn.execute(
select(
self.tables.square.c.area, self.tables.square.c.perimeter
)
.select_from(self.tables.square)
.order_by(self.tables.square.c.id)
).fetchall()
eq_(res, [(100, 40), (1764, 168)])
| ComputedColumnTest |
python | getsentry__sentry | src/sentry/integrations/pagerduty/integration.py | {
"start": 3027,
"end": 7224
} | class ____(IntegrationInstallation):
def get_keyring_client(self, keyid: int | str) -> PagerDutyClient:
org_integration = self.org_integration
assert org_integration, "Cannot get client without an organization integration"
integration_key = None
for pds in org_integration.config.get("pagerduty_services", []):
if str(pds["id"]) == str(keyid):
integration_key = pds["integration_key"]
if not integration_key:
raise ValueError("Cannot get client without an an integration_key.")
return PagerDutyClient(
integration_id=org_integration.integration_id, integration_key=integration_key
)
def get_client(self) -> None:
raise NotImplementedError("Use get_keyring_client instead.")
def get_organization_config(self) -> list[PagerDutyOrganizationConfig]:
return [
{
"name": "service_table",
"type": "table",
"label": "PagerDuty services with the Sentry integration enabled",
"help": "If services need to be updated, deleted, or added manually please do so here. Alert rules will need to be individually updated for any additions or deletions of services.",
"addButtonText": "",
"columnLabels": {"service": "Service", "integration_key": "Integration Key"},
"columnKeys": ["service", "integration_key"],
"confirmDeleteMessage": "Any alert rules associated with this service will stop working. The rules will still exist but will show a `removed` service.",
}
]
def update_organization_config(self, data: MutableMapping[str, Any]) -> None:
if "service_table" in data:
service_rows = data["service_table"]
# validate fields
bad_rows = list(
filter(lambda x: not x["service"] or not x["integration_key"], service_rows)
)
if bad_rows:
raise IntegrationError("Name and key are required")
oi = OrganizationIntegration.objects.get(id=self.org_integration.id)
existing_service_items: list[PagerDutyServiceDict] = oi.config.get(
"pagerduty_services", []
)
updated_items: list[PagerDutyServiceDict] = []
for service_item in existing_service_items:
# find the matching row from the input
matched_rows = list(filter(lambda x: x["id"] == service_item["id"], service_rows))
if matched_rows:
matched_row = matched_rows[0]
updated_items.append(
{
"id": matched_row["id"],
"integration_key": matched_row["integration_key"],
"service_name": matched_row["service"],
"integration_id": service_item["integration_id"],
}
)
with transaction.atomic(router.db_for_write(OrganizationIntegration)):
oi.config["pagerduty_services"] = updated_items
oi.save()
# new rows don't have an id
new_rows = list(filter(lambda x: not x["id"], service_rows))
for row in new_rows:
service_name = row["service"]
key = row["integration_key"]
add_service(oi, integration_key=key, service_name=service_name)
def get_config_data(self) -> Mapping[str, list[PagerDutyServiceConfig]]:
service_list = []
for s in self.services:
service_list.append(
PagerDutyServiceConfig(
service=s["service_name"],
integration_key=s["integration_key"],
id=s["id"],
)
)
return {"service_table": service_list}
@property
def services(self) -> list[PagerDutyServiceDict]:
if self.org_integration:
return self.org_integration.config.get("pagerduty_services", [])
return []
| PagerDutyIntegration |
python | simplejson__simplejson | simplejson/tests/test_dump.py | {
"start": 227,
"end": 616
} | class ____(binary_type):
def decode(self, encoding=None):
return "bad decode"
def __str__(self):
return "bad __str__"
def __bytes__(self):
return b("bad __bytes__")
def as_text_type(s):
if PY3 and isinstance(s, bytes):
return s.decode('ascii')
return s
def decode_iso_8859_15(b):
return b.decode('iso-8859-15')
| MisbehavingBytesSubtype |
python | huggingface__transformers | tests/models/qwen3_moe/test_modeling_qwen3_moe.py | {
"start": 4220,
"end": 9376
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = None
@classmethod
def tearDownClass(cls):
del cls.model
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@classmethod
def get_model(cls):
if cls.model is None:
cls.model = Qwen3MoeForCausalLM.from_pretrained(
"Qwen/Qwen3-30B-A3B-Base", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
return cls.model
@slow
def test_model_15b_a2b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = self.get_model()
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[0.3244, 0.4406, 9.0972, 7.3597, 4.9985, 8.0314, 8.2148, 9.2134]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# slicing logits[0, 0, 0:30]
EXPECTED_SLICE = torch.tensor([6.8984, 4.8633, 4.7734, 4.5898, 2.5664, 2.9902, 4.8828, 5.9414, 4.6250, 3.0840, 5.1602, 6.0117, 4.9453, 5.3008, 3.3145, 11.3906, 12.8359, 12.4844, 11.2891, 11.0547, 11.0391, 10.3359, 10.3438, 10.2578, 10.7969, 5.9688, 3.7676, 5.5938, 5.3633, 5.8203]) # fmt: skip
torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4)
@slow
def test_model_15b_a2b_generation(self):
EXPECTED_TEXT_COMPLETION = "To be or not to be: the role of the cell cycle in the regulation of apoptosis.\nThe cell cycle is a highly"
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False)
model = self.get_model()
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@require_bitsandbytes
@slow
@require_flash_attn
@pytest.mark.flash_attn_test
def test_model_15b_a2b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = Qwen3MoeForCausalLM.from_pretrained(
"Qwen/Qwen3-30B-A3B-Base",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
@slow
def test_model_15b_a2b_long_prompt_sdpa(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = self.get_model()
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
EXPECTED_TEXT_COMPLETION = "To be or not to be: the role of the cell cycle in the regulation of apoptosis.\nThe cell cycle is a highly"
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_speculative_generation(self):
EXPECTED_TEXT_COMPLETION = (
"To be or not to be: a question of life and death\n\nThe question of life and death is a question that has"
)
prompt = "To be or not to"
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-30B-A3B-Base", use_fast=False)
model = self.get_model()
assistant_model = model
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
set_seed(0)
generated_ids = model.generate(
input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
| Qwen3MoeIntegrationTest |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 7597,
"end": 7956
} | class ____(TorchDynamoException):
def __init__(self, msg: str, real_stack: Optional[StackSummary] = None) -> None:
super().__init__(msg)
self.msg = msg
self.real_stack = (
real_stack
if real_stack is not None
else torch._guards.TracingContext.extract_stack()
)
| UncapturedHigherOrderOpError |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_flow_runs.py | {
"start": 21356,
"end": 44932
} | class ____:
@pytest.fixture
async def flow_runs(self, flow, work_queue_1, session):
flow_2 = await models.flows.create_flow(
session=session,
flow=schemas.actions.FlowCreate(name="another-test"),
)
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.actions.FlowRunCreate(
flow_id=flow.id, name="fr1", tags=["red"]
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.actions.FlowRunCreate(
flow_id=flow.id, name="fr2", tags=["blue"]
),
)
flow_run_3 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow_2.id,
name="fr3",
tags=["blue", "red"],
work_queue_id=work_queue_1.id,
),
)
await session.commit()
return [flow_run_1, flow_run_2, flow_run_3]
@pytest.fixture
async def flow_runs_with_idempotency_key(
self, flow, work_queue_1, session
) -> List[core.FlowRun]:
"""
Return a list of two `core.FlowRun`'s with different idempotency keys.
"""
flow_run_1_with_idempotency_key = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.actions.FlowRunCreate(
flow_id=flow.id,
name="fr1",
tags=["red"],
idempotency_key="my-idempotency-key",
),
)
flow_run_2_with_a_different_idempotency_key = (
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.actions.FlowRunCreate(
flow_id=flow.id,
name="fr2",
tags=["blue"],
idempotency_key="a-different-idempotency-key",
),
)
)
await session.commit()
return [
flow_run_1_with_idempotency_key,
flow_run_2_with_a_different_idempotency_key,
]
async def test_read_flow_runs(self, flow_runs, client):
response = await client.post("/flow_runs/filter")
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 3
# return type should be correct
assert parse_obj_as(List[schemas.responses.FlowRunResponse], response.json())
async def test_read_flow_runs_work_pool_fields(
self,
flow_runs,
client,
work_pool,
work_queue_1,
):
response = await client.post("/flow_runs/filter")
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 3
response = sorted(
parse_obj_as(List[schemas.responses.FlowRunResponse], response.json()),
key=lambda fr: fr.name,
)
assert response[2].work_pool_name == work_pool.name
assert response[2].work_queue_name == work_queue_1.name
async def test_read_flow_runs_applies_flow_filter(self, flow, flow_runs, client):
flow_run_filter = dict(
flows=schemas.filters.FlowFilter(
id=schemas.filters.FlowFilterId(any_=[flow.id])
).model_dump(mode="json")
)
response = await client.post("/flow_runs/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 2
async def test_read_flow_runs_applies_flow_run_filter(
self, flow, flow_runs, client
):
flow_run_filter = dict(
flow_runs=schemas.filters.FlowRunFilter(
id=schemas.filters.FlowRunFilterId(any_=[flow_runs[0].id])
).model_dump(mode="json")
)
response = await client.post("/flow_runs/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_runs[0].id)
async def test_read_flow_runs_applies_flow_run_idempotency_key_filter(
self, flow_runs_with_idempotency_key, client
):
"""
This test tests that when we pass a value for idempotency key to the flow run
filter, we get back the flow run with the matching idempotency key.
"""
idempotency_key_of_flow_run_we_want_to_retrieve = (
flow_runs_with_idempotency_key[0].idempotency_key
)
flow_run_idempotency_key_filter = dict(
flow_runs=schemas.filters.FlowRunFilter(
idempotency_key=schemas.filters.FlowRunFilterIdempotencyKey(
any_=[idempotency_key_of_flow_run_we_want_to_retrieve]
)
).model_dump(mode="json")
)
response = await client.post(
"/flow_runs/filter", json=flow_run_idempotency_key_filter
)
flow_run_filter_results = response.json()
assert response.status_code == status.HTTP_200_OK, response.text
assert (
len(flow_run_filter_results) == 1
and len(flow_runs_with_idempotency_key) == 2
)
assert flow_run_filter_results[0]["idempotency_key"] == str(
idempotency_key_of_flow_run_we_want_to_retrieve
)
async def test_read_flow_runs_idempotency_key_filter_excludes_idempotency_key(
self, flow_runs_with_idempotency_key, client
):
"""
This test tests to make sure that when you pass idempotency keys to the not_any_ argument
of the filter, the filter excludes flow runs having that value for idempotency key
"""
idempotency_key_of_flow_run_to_exclude: str = flow_runs_with_idempotency_key[
0
].idempotency_key
idempotency_key_of_flow_run_that_should_be_included: str = (
flow_runs_with_idempotency_key[1].idempotency_key
)
flow_run_idempotency_key_exclude_filter = dict(
flow_runs=schemas.filters.FlowRunFilter(
idempotency_key=schemas.filters.FlowRunFilterIdempotencyKey(
not_any_=[idempotency_key_of_flow_run_to_exclude]
)
).model_dump(mode="json")
)
response = await client.post(
"/flow_runs/filter", json=flow_run_idempotency_key_exclude_filter
)
flow_run_filter_results = response.json()
assert response.status_code == status.HTTP_200_OK, response.text
# assert we started with two flow runs from fixture
assert len(flow_runs_with_idempotency_key) == 2
# filtering the fixture should result in a single element
assert len(flow_run_filter_results) == 1
# make sure the idempotency key we're excluding is not included in the results
for result in flow_run_filter_results:
assert result["idempotency_key"] != idempotency_key_of_flow_run_to_exclude
# make sure the idempotency key we did not exclude is still in the results
assert flow_run_filter_results[0]["idempotency_key"] == str(
idempotency_key_of_flow_run_that_should_be_included
)
async def test_read_flow_runs_applies_task_run_filter(
self, flow, flow_runs, client, session
):
task_run_1 = await models.task_runs.create_task_run(
session=session,
task_run=schemas.actions.TaskRunCreate(
flow_run_id=flow_runs[1].id, task_key="my-key", dynamic_key="0"
),
)
await session.commit()
flow_run_filter = dict(
task_runs=schemas.filters.TaskRunFilter(
id=schemas.filters.TaskRunFilterId(any_=[task_run_1.id])
).model_dump(mode="json")
)
response = await client.post("/flow_runs/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_runs[1].id)
async def test_read_flow_runs_applies_work_pool_name_filter(
self, flow_runs, client, work_pool
):
work_pool_filter = dict(
work_pools=schemas.filters.WorkPoolFilter(
name=schemas.filters.WorkPoolFilterName(any_=[work_pool.name])
).model_dump(mode="json")
)
response = await client.post("/flow_runs/filter", json=work_pool_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_runs[2].id)
async def test_read_flow_runs_applies_work_queue_id_filter(
self,
flow_runs,
work_queue_1,
client,
):
work_pool_filter = dict(
work_pool_queues=schemas.filters.WorkQueueFilter(
id=schemas.filters.WorkQueueFilterId(any_=[work_queue_1.id])
).model_dump(mode="json")
)
response = await client.post("/flow_runs/filter", json=work_pool_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_runs[2].id)
async def test_read_flow_runs_multi_filter(self, flow, flow_runs, client):
flow_run_filter = dict(
flow_runs=dict(tags=dict(all_=["blue"])),
flows=dict(name=dict(any_=["another-test"])),
limit=1,
offset=0,
)
response = await client.post("/flow_runs/filter", json=flow_run_filter)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_runs[2].id)
async def test_read_flow_runs_applies_limit(self, flow_runs, client):
response = await client.post("/flow_runs/filter", json=dict(limit=1))
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
async def test_read_flow_runs_returns_empty_list(self, client):
response = await client.post("/flow_runs/filter")
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json() == []
async def test_read_flow_runs_applies_sort(self, session, flow, client):
current_time = now("UTC")
flow_run_1 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
name="Flow Run 1",
state=schemas.states.State(
type=StateType.SCHEDULED,
timestamp=current_time - datetime.timedelta(minutes=1),
),
),
)
flow_run_2 = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
name="Flow Run 2",
state=schemas.states.State(
type=StateType.SCHEDULED,
timestamp=current_time + datetime.timedelta(minutes=1),
),
start_time=current_time - datetime.timedelta(minutes=2),
),
)
await session.commit()
response = await client.post(
"/flow_runs/filter",
json=dict(limit=1, sort=schemas.sorting.FlowRunSort.START_TIME_ASC.value),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_2.id)
response = await client.post(
"/flow_runs/filter",
json=dict(limit=1, sort=schemas.sorting.FlowRunSort.START_TIME_DESC.value),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_1.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1, sort=schemas.sorting.FlowRunSort.EXPECTED_START_TIME_ASC.value
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_1.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1,
offset=1,
sort=schemas.sorting.FlowRunSort.EXPECTED_START_TIME_ASC.value,
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_2.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1, sort=schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC.value
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_2.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1,
offset=1,
sort=schemas.sorting.FlowRunSort.EXPECTED_START_TIME_DESC.value,
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_1.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1,
sort=schemas.sorting.FlowRunSort.NAME_ASC.value,
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_1.id)
response = await client.post(
"/flow_runs/filter",
json=dict(
limit=1,
sort=schemas.sorting.FlowRunSort.NAME_DESC.value,
),
)
assert response.status_code == status.HTTP_200_OK, response.text
assert response.json()[0]["id"] == str(flow_run_2.id)
@pytest.mark.parametrize(
"sort", [sort_option.value for sort_option in schemas.sorting.FlowRunSort]
)
async def test_read_flow_runs_sort_succeeds_for_all_sort_values(
self, sort, flow_run, client
):
response = await client.post("/flow_runs/filter", json=dict(sort=sort))
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 1
assert response.json()[0]["id"] == str(flow_run.id)
@pytest.fixture
async def parent_flow_run(self, flow, session):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
),
)
await session.commit()
return flow_run
@pytest.fixture
async def child_runs(
self,
flow,
parent_flow_run,
session,
):
children = []
for i in range(5):
dummy_task = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=parent_flow_run.id,
name=f"dummy-{i}",
task_key=f"dummy-{i}",
dynamic_key=f"dummy-{i}",
),
)
children.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
parent_task_run_id=dummy_task.id,
),
)
)
return children
@pytest.fixture
async def grandchild_runs(self, flow, child_runs, session):
grandchildren = []
for child in child_runs:
for i in range(3):
dummy_task = await models.task_runs.create_task_run(
session=session,
task_run=schemas.core.TaskRun(
flow_run_id=child.id,
name=f"dummy-{i}",
task_key=f"dummy-{i}",
dynamic_key=f"dummy-{i}",
),
)
grandchildren.append(
await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
parent_task_run_id=dummy_task.id,
),
)
)
return grandchildren
async def test_read_subflow_runs(
self,
client,
parent_flow_run,
child_runs,
# included to make sure we're only going 1 level deep
grandchild_runs,
# included to make sure we're not bringing in extra flow runs
flow_runs,
):
"""We should be able to find all subflow runs of a given flow run."""
subflow_filter = {
"flow_runs": schemas.filters.FlowRunFilter(
parent_flow_run_id=schemas.filters.FlowRunFilterParentFlowRunId(
any_=[parent_flow_run.id]
)
).model_dump(mode="json")
}
response = await client.post(
"/flow_runs/filter",
json=subflow_filter,
)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == len(child_runs)
returned = {UUID(run["id"]) for run in response.json()}
expected = {run.id for run in child_runs}
assert returned == expected
async def test_read_subflow_runs_non_existant(
self,
client,
# including these to make sure we aren't bringing in extra flow runs
parent_flow_run,
child_runs,
grandchild_runs,
flow_runs,
):
subflow_filter = {
"flow_runs": schemas.filters.FlowRunFilter(
parent_flow_run_id=schemas.filters.FlowRunFilterParentFlowRunId(
any_=[uuid4()]
)
).model_dump(mode="json")
}
response = await client.post(
"/flow_runs/filter",
json=subflow_filter,
)
assert response.status_code == status.HTTP_200_OK, response.text
assert len(response.json()) == 0
@pytest.fixture
async def pending_run(self, flow, session):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Pending(),
),
)
await session.commit()
return flow_run
@pytest.fixture
async def scheduled_run(self, flow, session):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
flow_version="1.0",
state=schemas.states.Scheduled(),
),
)
await session.commit()
return flow_run
async def test_read_flow_runs_filter_include_state_type(
self,
pending_run,
scheduled_run,
client,
):
# filter for pending runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"type": {"any_": ["PENDING"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == pending_run.id
# filter for scheduled runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"type": {"any_": ["SCHEDULED"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == scheduled_run.id
async def test_read_flow_runs_filter_exclude_state_type(
self,
pending_run,
scheduled_run,
client,
):
# exclude pending runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"type": {"not_any_": ["PENDING"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == scheduled_run.id
# exclude scheduled runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"type": {"not_any_": ["SCHEDULED"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == pending_run.id
async def test_read_flow_runs_filter_include_state_name(
self,
pending_run,
scheduled_run,
client,
):
# filter for pending runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"name": {"any_": ["Pending"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == pending_run.id
# filter for scheduled runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"name": {"any_": ["Scheduled"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == scheduled_run.id
async def test_read_flow_runs_filter_exclude_state_name(
self,
pending_run,
scheduled_run,
client,
):
# exclude pending runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"name": {"not_any_": ["Pending"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == scheduled_run.id
# exclude scheduled runs
response = await client.post(
"/flow_runs/filter",
json={"flow_runs": {"state": {"name": {"not_any_": ["Scheduled"]}}}},
)
assert response.status_code == 200
flow_runs = parse_obj_as(list[FlowRunResponse], response.json())
assert len(flow_runs) == 1
assert flow_runs[0].id == pending_run.id
| TestReadFlowRuns |
python | tornadoweb__tornado | tornado/test/ioloop_test.py | {
"start": 17419,
"end": 17894
} | class ____(AsyncTestCase):
def setUp(self):
super().setUp()
setup_with_context_manager(self, ignore_deprecation())
@gen_test
def test_clear_without_current(self):
# If there is no current IOLoop, clear_current is a no-op (but
# should not fail). Use a thread so we see the threading.Local
# in a pristine state.
with ThreadPoolExecutor(1) as e:
yield e.submit(IOLoop.clear_current)
| TestIOLoopCurrentAsync |
python | pytorch__pytorch | torch/onnx/_internal/fx/passes/type_promotion.py | {
"start": 1129,
"end": 1630
} | class ____:
"""Type promotion snapshot for a fx node and its inputs.
Contains the promoted dtype for args and kwargs that needs promoting.
Contains the expected node output dtype.
"""
args_dtypes: Mapping[int, torch.dtype]
"""Mapping from arg position to dtype to promote to."""
kwargs_dtypes: Mapping[str, torch.dtype]
"""Mapping from kwarg name to dtype to promote to."""
out_dtype: torch.dtype
"""Expected output dtype of the node."""
| TypePromotionSnapshot |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/escalation.py | {
"start": 3191,
"end": 6864
} | class ____:
# The `interesting_origin` is how Hypothesis distinguishes between multiple
# failures, for reporting and also to replay from the example database (even
# if report_multiple_bugs=False). We traditionally use the exception type and
# location, but have extracted this logic in order to see through `except ...:`
# blocks and understand the __cause__ (`raise x from y`) or __context__ that
# first raised an exception as well as PEP-654 exception groups.
exc_type: type[BaseException]
filename: str | None
lineno: int | None
context: "InterestingOrigin | tuple[()]"
group_elems: "tuple[InterestingOrigin, ...]"
def __str__(self) -> str:
ctx = ""
if self.context:
ctx = textwrap.indent(f"\ncontext: {self.context}", prefix=" ")
group = ""
if self.group_elems:
chunks = "\n ".join(str(x) for x in self.group_elems)
group = textwrap.indent(f"\nchild exceptions:\n {chunks}", prefix=" ")
return f"{self.exc_type.__name__} at {self.filename}:{self.lineno}{ctx}{group}"
@classmethod
def from_exception(
cls, exception: BaseException, /, seen: tuple[BaseException, ...] = ()
) -> "InterestingOrigin":
filename, lineno = None, None
if tb := get_trimmed_traceback(exception):
filename, lineno, *_ = traceback.extract_tb(tb)[-1]
seen = (*seen, exception)
make = partial(cls.from_exception, seen=seen)
context: InterestingOrigin | tuple[()] = ()
if exception.__context__ is not None and exception.__context__ not in seen:
context = make(exception.__context__)
return cls(
type(exception),
filename,
lineno,
# Note that if __cause__ is set it is always equal to __context__, explicitly
# to support introspection when debugging, so we can use that unconditionally.
context,
# We distinguish exception groups by the inner exceptions, as for __context__
(
tuple(make(exc) for exc in exception.exceptions if exc not in seen)
if isinstance(exception, BaseExceptionGroup)
else ()
),
)
current_pytest_item = DynamicVariable(None)
def _get_exceptioninfo():
# ExceptionInfo was moved to the top-level namespace in Pytest 7.0
if "pytest" in sys.modules:
with contextlib.suppress(Exception):
# From Pytest 7, __init__ warns on direct calls.
return sys.modules["pytest"].ExceptionInfo.from_exc_info
if "_pytest._code" in sys.modules: # old versions only
with contextlib.suppress(Exception):
return sys.modules["_pytest._code"].ExceptionInfo
return None # pragma: no cover # coverage tests always use pytest
def format_exception(err, tb):
# Try using Pytest to match the currently configured traceback style
ExceptionInfo = _get_exceptioninfo()
if current_pytest_item.value is not None and ExceptionInfo is not None:
item = current_pytest_item.value
return str(item.repr_failure(ExceptionInfo((type(err), err, tb)))) + "\n"
# Or use better_exceptions, if that's installed and enabled
if "better_exceptions" in sys.modules:
better_exceptions = sys.modules["better_exceptions"]
if sys.excepthook is better_exceptions.excepthook:
return "".join(better_exceptions.format_exception(type(err), err, tb))
# If all else fails, use the standard-library formatting tools
return "".join(traceback.format_exception(type(err), err, tb))
| InterestingOrigin |
python | langchain-ai__langchain | libs/standard-tests/langchain_tests/unit_tests/chat_models.py | {
"start": 6499,
"end": 34644
} | class ____(ChatModelTests):
'''Base class for chat model unit tests.
Test subclasses must implement the `chat_model_class` and
`chat_model_params` properties to specify what model to test and its
initialization parameters.
```python
from typing import Type
from langchain_tests.unit_tests import ChatModelUnitTests
from my_package.chat_models import MyChatModel
class TestMyChatModelUnit(ChatModelUnitTests):
@property
def chat_model_class(self) -> Type[MyChatModel]:
# Return the chat model class to test here
return MyChatModel
@property
def chat_model_params(self) -> dict:
# Return initialization parameters for the model.
return {"model": "model-001", "temperature": 0}
```
!!! note
API references for individual test methods include troubleshooting tips.
Test subclasses **must** implement the following two properties:
`chat_model_class`: The chat model class to test, e.g., `ChatParrotLink`.
```python
@property
def chat_model_class(self) -> Type[ChatParrotLink]:
return ChatParrotLink
```
`chat_model_params`: Initialization parameters for the chat model.
```python
@property
def chat_model_params(self) -> dict:
return {"model": "bird-brain-001", "temperature": 0}
```
In addition, test subclasses can control what features are tested (such as tool
calling or multi-modality) by selectively overriding the following properties.
Expand to see details:
??? info "`has_tool_calling`"
Boolean property indicating whether the chat model supports tool calling.
By default, this is determined by whether the chat model's `bind_tools` method
is overridden. It typically does not need to be overridden on the test class.
```python "Example override"
@property
def has_tool_calling(self) -> bool:
return True
```
??? info "`tool_choice_value`"
Value to use for tool choice when used in tests.
!!! warning
Deprecated since version 0.3.15.
This property will be removed in version 0.3.20. If a model does not
support forcing tool calling, override the `has_tool_choice` property to
return `False`. Otherwise, models should accept values of `'any'` or
the name of a tool in `tool_choice`.
```python
@property
def tool_choice_value(self) -> str | None:
return "any"
```
??? info "`has_tool_choice`"
Boolean property indicating whether the chat model supports forcing tool
calling via a `tool_choice` parameter.
By default, this is determined by whether the parameter is included in the
signature for the corresponding `bind_tools` method.
If `True`, the minimum requirement for this feature is that
`tool_choice="any"` will force a tool call, and `tool_choice=<tool name>`
will force a call to a specific tool.
```python "Example override"
@property
def has_tool_choice(self) -> bool:
return False
```
??? info "`has_structured_output`"
Boolean property indicating whether the chat model supports structured
output.
By default, this is determined by whether the chat model overrides the
`with_structured_output` or `bind_tools` methods. If the base
implementations are intended to be used, this method should be overridden.
See: https://docs.langchain.com/oss/python/langchain/structured-output
```python
@property
def has_structured_output(self) -> bool:
return True
```
??? info "`structured_output_kwargs`"
Dict property that can be used to specify additional kwargs for
`with_structured_output`.
Useful for testing different models.
```python
@property
def structured_output_kwargs(self) -> dict:
return {"method": "function_calling"}
```
??? info "`supports_json_mode`"
Boolean property indicating whether the chat model supports JSON mode in
`with_structured_output`.
See: https://docs.langchain.com/oss/python/langchain/structured-output
```python
@property
def supports_json_mode(self) -> bool:
return True
```
??? info "`supports_image_inputs`"
Boolean property indicating whether the chat model supports image inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested using the LangChain
`ImageContentBlock` format:
```python
{
"type": "image",
"base64": "<base64 image data>",
"mime_type": "image/jpeg", # or appropriate MIME type
}
```
In addition to OpenAI Chat Completions `image_url` blocks:
```python
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_image_inputs(self) -> bool:
return True
```
??? info "`supports_image_urls`"
Boolean property indicating whether the chat model supports image inputs from
URLs.
Defaults to `False`.
If set to `True`, the chat model will be tested using content blocks of the
form.
```python
{
"type": "image",
"url": "https://...",
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_image_urls(self) -> bool:
return True
```
??? info "`supports_pdf_inputs`"
Boolean property indicating whether the chat model supports PDF inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested using the LangChain
`FileContentBlock` format:
```python
{
"type": "file",
"base64": "<base64 file data>",
"mime_type": "application/pdf",
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_pdf_inputs(self) -> bool:
return True
```
??? info "`supports_audio_inputs`"
Boolean property indicating whether the chat model supports audio inputs.
Defaults to `False`.
If set to `True`, the chat model will be tested using the LangChain
`AudioContentBlock` format:
```python
{
"type": "audio",
"base64": "<base64 audio data>",
"mime_type": "audio/wav", # or appropriate MIME type
}
```
See https://docs.langchain.com/oss/python/langchain/models#multimodal
```python
@property
def supports_audio_inputs(self) -> bool:
return True
```
!!! warning
This test downloads audio data from wikimedia.org. You may need to set the
`LANGCHAIN_TESTS_USER_AGENT` environment variable to identify these tests,
e.g.,
```bash
export LANGCHAIN_TESTS_USER_AGENT="CoolBot/0.0 (https://example.org/coolbot/; coolbot@example.org) generic-library/0.0"
```
Refer to the [Wikimedia Foundation User-Agent Policy](https://foundation.wikimedia.org/wiki/Policy:Wikimedia_Foundation_User-Agent_Policy).
??? info "`supports_video_inputs`"
Boolean property indicating whether the chat model supports image inputs.
Defaults to `False`.
No current tests are written for this feature.
??? info "`returns_usage_metadata`"
Boolean property indicating whether the chat model returns usage metadata
on invoke and streaming responses.
Defaults to `True`.
`usage_metadata` is an optional dict attribute on `AIMessage` objects that track
input and output tokens.
[See more](https://reference.langchain.com/python/langchain_core/language_models/#langchain_core.messages.ai.UsageMetadata).
```python
@property
def returns_usage_metadata(self) -> bool:
return False
```
Models supporting `usage_metadata` should also return the name of the
underlying model in the `response_metadata` of the `AIMessage`.
??? info "`supports_anthropic_inputs`"
Boolean property indicating whether the chat model supports Anthropic-style
inputs.
These inputs might feature "tool use" and "tool result" content blocks, e.g.,
```python
[
{"type": "text", "text": "Hmm let me think about that"},
{
"type": "tool_use",
"input": {"fav_color": "green"},
"id": "foo",
"name": "color_picker",
},
]
```
If set to `True`, the chat model will be tested using content blocks of this
form.
```python
@property
def supports_anthropic_inputs(self) -> bool:
return False
```
??? info "`supports_image_tool_message`"
Boolean property indicating whether the chat model supports `ToolMessage`
objects that include image content, e.g.,
```python
ToolMessage(
content=[
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{image_data}"},
},
],
tool_call_id="1",
name="random_image",
)
```
(OpenAI Chat Completions format), as well as LangChain's `ImageContentBlock`
format:
```python
ToolMessage(
content=[
{
"type": "image",
"base64": image_data,
"mime_type": "image/jpeg",
},
],
tool_call_id="1",
name="random_image",
)
```
(standard format).
If set to `True`, the chat model will be tested with message sequences that
include `ToolMessage` objects of this form.
```python
@property
def supports_image_tool_message(self) -> bool:
return False
```
??? info "`supports_pdf_tool_message`"
Boolean property indicating whether the chat model supports `ToolMessage`
objects that include PDF content, i.e.,
```python
ToolMessage(
content=[
{
"type": "file",
"base64": pdf_data,
"mime_type": "application/pdf",
},
],
tool_call_id="1",
name="random_pdf",
)
```
using LangChain's `FileContentBlock` format.
If set to `True`, the chat model will be tested with message sequences that
include `ToolMessage` objects of this form.
```python
@property
def supports_pdf_tool_message(self) -> bool:
return False
```
??? info "`supported_usage_metadata_details`"
Property controlling what usage metadata details are emitted in both `invoke`
and `stream`.
`usage_metadata` is an optional dict attribute on `AIMessage` objects that track
input and output tokens.
[See more](https://reference.langchain.com/python/langchain_core/language_models/#langchain_core.messages.ai.UsageMetadata).
It includes optional keys `input_token_details` and `output_token_details`
that can track usage details associated with special types of tokens, such as
cached, audio, or reasoning.
Only needs to be overridden if these details are supplied.
??? info "`enable_vcr_tests`"
Property controlling whether to enable select tests that rely on
[VCR](https://vcrpy.readthedocs.io/en/latest/) caching of HTTP calls, such
as benchmarking tests.
To enable these tests, follow these steps:
1. Override the `enable_vcr_tests` property to return `True`:
```python
@property
def enable_vcr_tests(self) -> bool:
return True
```
2. Configure VCR to exclude sensitive headers and other information from
cassettes.
!!! warning
VCR will by default record authentication headers and other sensitive
information in cassettes. Read below for how to configure what
information is recorded in cassettes.
To add configuration to VCR, add a `conftest.py` file to the `tests/`
directory and implement the `vcr_config` fixture there.
`langchain-tests` excludes the headers `'authorization'`,
`'x-api-key'`, and `'api-key'` from VCR cassettes. To pick up this
configuration, you will need to add `conftest.py` as shown below. You can
also exclude additional headers, override the default exclusions, or apply
other customizations to the VCR configuration. See example below:
```python title="tests/conftest.py"
import pytest
from langchain_tests.conftest import (
_base_vcr_config as _base_vcr_config,
)
_EXTRA_HEADERS = [
# Specify additional headers to redact
("user-agent", "PLACEHOLDER"),
]
def remove_response_headers(response: dict) -> dict:
# If desired, remove or modify headers in the response.
response["headers"] = {}
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration from langchain_tests."""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_response"] = remove_response_headers
return config
```
??? note "Compressing cassettes"
`langchain-tests` includes a custom VCR serializer that compresses
cassettes using gzip. To use it, register the `yaml.gz` serializer
to your VCR fixture and enable this serializer in the config. See
example below:
```python title="tests/conftest.py"
import pytest
from langchain_tests.conftest import (
CustomPersister,
CustomSerializer,
)
from langchain_tests.conftest import (
_base_vcr_config as _base_vcr_config,
)
from vcr import VCR
_EXTRA_HEADERS = [
# Specify additional headers to redact
("user-agent", "PLACEHOLDER"),
]
def remove_response_headers(response: dict) -> dict:
# If desired, remove or modify headers in the response.
response["headers"] = {}
return response
@pytest.fixture(scope="session")
def vcr_config(_base_vcr_config: dict) -> dict: # noqa: F811
"""Extend the default configuration from langchain_tests."""
config = _base_vcr_config.copy()
config.setdefault("filter_headers", []).extend(_EXTRA_HEADERS)
config["before_record_response"] = remove_response_headers
# New: enable serializer and set file extension
config["serializer"] = "yaml.gz"
config["path_transformer"] = VCR.ensure_suffix(".yaml.gz")
return config
def pytest_recording_configure(config: dict, vcr: VCR) -> None:
vcr.register_persister(CustomPersister())
vcr.register_serializer("yaml.gz", CustomSerializer())
```
You can inspect the contents of the compressed cassettes (e.g., to
ensure no sensitive information is recorded) using
```bash
gunzip -k /path/to/tests/cassettes/TestClass_test.yaml.gz
```
...or by using the serializer:
```python
from langchain_tests.conftest import (
CustomPersister,
CustomSerializer,
)
cassette_path = "/path/to/tests/cassettes/TestClass_test.yaml.gz"
requests, responses = CustomPersister().load_cassette(
path, CustomSerializer()
)
```
3. Run tests to generate VCR cassettes.
Example:
```bash
uv run python -m pytest tests/integration_tests/test_chat_models.py::TestMyModel::test_stream_time
```
This will generate a VCR cassette for the test in
`tests/integration_tests/cassettes/`.
!!! warning
You should inspect the generated cassette to ensure that it does not
contain sensitive information. If it does, you can modify the
`vcr_config` fixture to exclude headers or modify the response
before it is recorded.
You can then commit the cassette to your repository. Subsequent test runs
will use the cassette instead of making HTTP calls.
**Testing initialization from environment variables**
Some unit tests may require testing initialization from environment variables.
These tests can be enabled by overriding the `init_from_env_params`
property (see below).
??? info "`init_from_env_params`"
This property is used in unit tests to test initialization from
environment variables. It should return a tuple of three dictionaries
that specify the environment variables, additional initialization args,
and expected instance attributes to check.
Defaults to empty dicts. If not overridden, the test is skipped.
Example:
```python
@property
def init_from_env_params(self) -> Tuple[dict, dict, dict]:
return (
{
"MY_API_KEY": "api_key",
},
{
"model": "bird-brain-001",
},
{
"my_api_key": "api_key",
},
)
```
''' # noqa: E501,D214
@property
def standard_chat_model_params(self) -> dict:
"""Standard chat model parameters."""
params = super().standard_chat_model_params
params["api_key"] = "test"
return params
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Init from env params.
Environment variables, additional initialization args, and expected instance
attributes for testing initialization from environment variables.
"""
return {}, {}, {}
def test_init(self) -> None:
"""Test model initialization. This should pass for all integrations.
??? question "Troubleshooting"
If this test fails, ensure that:
1. `chat_model_params` is specified and the model can be initialized
from those params;
2. The model accommodates
[standard parameters](https://python.langchain.com/docs/concepts/chat_models/#standard-parameters).
"""
model = self.chat_model_class(
**{
**self.standard_chat_model_params,
**self.chat_model_params,
}
)
assert model is not None
def test_init_from_env(self) -> None:
"""Test initialization from environment variables.
Relies on the `init_from_env_params` property. Test is skipped if that
property is not set.
??? question "Troubleshooting"
If this test fails, ensure that `init_from_env_params` is specified
correctly and that model parameters are properly set from environment
variables during initialization.
"""
env_params, model_params, expected_attrs = self.init_from_env_params
if not env_params:
pytest.skip("init_from_env_params not specified.")
else:
with mock.patch.dict(os.environ, env_params):
model = self.chat_model_class(**model_params)
assert model is not None
for k, expected in expected_attrs.items():
actual = getattr(model, k)
if isinstance(actual, SecretStr):
actual = actual.get_secret_value()
assert actual == expected
def test_init_streaming(
self,
) -> None:
"""Test that model can be initialized with `streaming=True`.
This is for backward-compatibility purposes.
??? question "Troubleshooting"
If this test fails, ensure that the model can be initialized with a
boolean `streaming` parameter.
"""
model = self.chat_model_class(
**{
**self.standard_chat_model_params,
**self.chat_model_params,
"streaming": True,
}
)
assert model is not None
def test_bind_tool_pydantic(
self,
model: BaseChatModel,
my_adder_tool: BaseTool,
) -> None:
"""Test bind tools with Pydantic models.
Test that chat model correctly handles Pydantic models that are passed
into `bind_tools`. Test is skipped if the `has_tool_calling` property
on the test class is False.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles Pydantic V2 models. `langchain_core` implements
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
See example implementation of `bind_tools` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.bind_tools
"""
if not self.has_tool_calling:
return
def my_adder(a: int, b: int) -> int:
"""Return the sum of two integers."""
return a + b
tools = [my_adder_tool, my_adder]
for pydantic_model in TEST_PYDANTIC_MODELS:
model_schema = (
pydantic_model.model_json_schema()
if hasattr(pydantic_model, "model_json_schema")
else pydantic_model.schema()
)
tools.extend([pydantic_model, model_schema])
# Doing a mypy ignore here since some of the tools are from pydantic
# BaseModel 2 which isn't typed properly yet. This will need to be fixed
# so type checking does not become annoying to users.
tool_model = model.bind_tools(tools, tool_choice="any") # type: ignore[arg-type]
assert isinstance(tool_model, RunnableBinding)
@pytest.mark.parametrize("schema", TEST_PYDANTIC_MODELS)
def test_with_structured_output(
self,
model: BaseChatModel,
schema: Any,
) -> None:
"""Test `with_structured_output` method.
Test is skipped if the `has_structured_output` property on the test class is
False.
??? question "Troubleshooting"
If this test fails, ensure that the model's `bind_tools` method
properly handles Pydantic V2 models. `langchain_core` implements
a utility function that will accommodate most formats: https://python.langchain.com/api_reference/core/utils/langchain_core.utils.function_calling.convert_to_openai_tool.html
See example implementation of `with_structured_output` here: https://python.langchain.com/api_reference/_modules/langchain_openai/chat_models/base.html#BaseChatOpenAI.with_structured_output
"""
if not self.has_structured_output:
return
assert model.with_structured_output(schema) is not None
for method in ["json_schema", "function_calling", "json_mode"]:
strict_values = [None, False, True] if method != "json_mode" else [None]
for strict in strict_values:
assert model.with_structured_output(
schema, method=method, strict=strict
)
def test_standard_params(self, model: BaseChatModel) -> None:
"""Test that model properly generates standard parameters.
These are used for tracing purposes.
??? question "Troubleshooting"
If this test fails, check that the model accommodates [standard parameters](https://python.langchain.com/docs/concepts/chat_models/#standard-parameters).
Check also that the model class is named according to convention
(e.g., `ChatProviderName`).
"""
class ExpectedParams(BaseModel):
ls_provider: str
ls_model_name: str
ls_model_type: Literal["chat"]
ls_temperature: float | None = None
ls_max_tokens: int | None = None
ls_stop: list[str] | None = None
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")
# Test optional params
model = self.chat_model_class(
max_tokens=10,
stop=["test"],
**self.chat_model_params,
)
ls_params = model._get_ls_params()
try:
ExpectedParams(**ls_params)
except ValidationError as e:
pytest.fail(f"Validation error: {e}")
def test_serdes(self, model: BaseChatModel, snapshot: SnapshotAssertion) -> None:
"""Test serialization and deserialization of the model.
Test is skipped if the `is_lc_serializable` property on the chat model class
is not overwritten to return `True`.
??? question "Troubleshooting"
If this test fails, check that the `init_from_env_params` property is
correctly set on the test class.
"""
if not self.chat_model_class.is_lc_serializable():
pytest.skip("Model is not serializable.")
else:
env_params, _model_params, _expected_attrs = self.init_from_env_params
with mock.patch.dict(os.environ, env_params):
ser = dumpd(model)
assert ser == snapshot(name="serialized")
assert (
model.dict()
== load(
dumpd(model), valid_namespaces=model.get_lc_namespace()[:1]
).dict()
)
@pytest.mark.benchmark
def test_init_time(self, benchmark: BenchmarkFixture) -> None:
"""Test initialization time of the chat model.
If this test fails, check that
we are not introducing undue overhead in the model's initialization.
"""
def _init_in_loop() -> None:
for _ in range(10):
self.chat_model_class(**self.chat_model_params)
benchmark(_init_in_loop)
| ChatModelUnitTests |
python | django__django | tests/utils_tests/test_autoreload.py | {
"start": 13666,
"end": 15048
} | class ____(SimpleTestCase):
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: "true"})
@mock.patch("django.utils.autoreload.get_reloader")
def test_swallows_keyboard_interrupt(self, mocked_get_reloader):
mocked_get_reloader.side_effect = KeyboardInterrupt()
autoreload.run_with_reloader(lambda: None) # No exception
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: "false"})
@mock.patch("django.utils.autoreload.restart_with_reloader")
def test_calls_sys_exit(self, mocked_restart_reloader):
mocked_restart_reloader.return_value = 1
with self.assertRaises(SystemExit) as exc:
autoreload.run_with_reloader(lambda: None)
self.assertEqual(exc.exception.code, 1)
@mock.patch.dict(os.environ, {autoreload.DJANGO_AUTORELOAD_ENV: "true"})
@mock.patch("django.utils.autoreload.start_django")
@mock.patch("django.utils.autoreload.get_reloader")
def test_calls_start_django(self, mocked_reloader, mocked_start_django):
mocked_reloader.return_value = mock.sentinel.RELOADER
autoreload.run_with_reloader(mock.sentinel.METHOD)
self.assertEqual(mocked_start_django.call_count, 1)
self.assertSequenceEqual(
mocked_start_django.call_args[0],
[mock.sentinel.RELOADER, mock.sentinel.METHOD],
)
| RunWithReloaderTests |
python | ray-project__ray | rllib/utils/metrics/learner_info.py | {
"start": 545,
"end": 4441
} | class ____:
def __init__(self, num_devices: int = 1):
self.num_devices = num_devices
self.results_all_towers = defaultdict(list)
self.is_finalized = False
def add_learn_on_batch_results(
self,
results: Dict,
policy_id: PolicyID = DEFAULT_POLICY_ID,
) -> None:
"""Adds a policy.learn_on_(loaded)?_batch() result to this builder.
Args:
results: The results returned by Policy.learn_on_batch or
Policy.learn_on_loaded_batch.
policy_id: The policy's ID, whose learn_on_(loaded)_batch method
returned `results`.
"""
assert (
not self.is_finalized
), "LearnerInfo already finalized! Cannot add more results."
# No towers: Single CPU.
if "tower_0" not in results:
self.results_all_towers[policy_id].append(results)
# Multi-GPU case:
else:
self.results_all_towers[policy_id].append(
tree.map_structure_with_path(
lambda p, *s: _all_tower_reduce(p, *s),
*(
results.pop("tower_{}".format(tower_num))
for tower_num in range(self.num_devices)
)
)
)
for k, v in results.items():
if k == LEARNER_STATS_KEY:
for k1, v1 in results[k].items():
self.results_all_towers[policy_id][-1][LEARNER_STATS_KEY][
k1
] = v1
else:
self.results_all_towers[policy_id][-1][k] = v
def add_learn_on_batch_results_multi_agent(
self,
all_policies_results: Dict,
) -> None:
"""Adds multiple policy.learn_on_(loaded)?_batch() results to this builder.
Args:
all_policies_results: The results returned by all Policy.learn_on_batch or
Policy.learn_on_loaded_batch wrapped as a dict mapping policy ID to
results.
"""
for pid, result in all_policies_results.items():
if pid != "batch_count":
self.add_learn_on_batch_results(result, policy_id=pid)
def finalize(self):
self.is_finalized = True
info = {}
for policy_id, results_all_towers in self.results_all_towers.items():
# Reduce mean across all minibatch SGD steps (axis=0 to keep
# all shapes as-is).
info[policy_id] = tree.map_structure_with_path(
_all_tower_reduce, *results_all_towers
)
return info
@OldAPIStack
def _all_tower_reduce(path, *tower_data):
"""Reduces stats across towers based on their stats-dict paths."""
# TD-errors: Need to stay per batch item in order to be able to update
# each item's weight in a prioritized replay buffer.
if len(path) == 1 and path[0] == "td_error":
return np.concatenate(tower_data, axis=0)
elif tower_data[0] is None:
return None
if isinstance(path[-1], str):
# TODO(sven): We need to fix this terrible dependency on `str.starts_with`
# for determining, how to aggregate these stats! As "num_..." might
# be a good indicator for summing, it will fail if the stats is e.g.
# `num_samples_per_sec" :)
# Counter stats: Reduce sum.
# if path[-1].startswith("num_"):
# return np.nansum(tower_data)
# Min stats: Reduce min.
if path[-1].startswith("min_"):
return np.nanmin(tower_data)
# Max stats: Reduce max.
elif path[-1].startswith("max_"):
return np.nanmax(tower_data)
if np.isnan(tower_data).all():
return np.nan
# Everything else: Reduce mean.
return np.nanmean(tower_data)
| LearnerInfoBuilder |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_easter.py | {
"start": 319,
"end": 4675
} | class ____:
@pytest.mark.parametrize(
"offset,date,expected",
[
(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4)),
(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24)),
(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24)),
(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24)),
(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8)),
(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4)),
(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4)),
(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12)),
(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)),
(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23)),
],
)
def test_offset(self, offset, date, expected):
assert_offset_equal(offset, date, expected)
@pytest.mark.parametrize(
"offset,date,expected",
[
(Easter(method=EASTER_WESTERN), datetime(2010, 1, 1), datetime(2010, 4, 4)),
(
Easter(method=EASTER_WESTERN),
datetime(2010, 4, 5),
datetime(2011, 4, 24),
),
(
Easter(2, method=EASTER_WESTERN),
datetime(2010, 1, 1),
datetime(2011, 4, 24),
),
(
Easter(method=EASTER_WESTERN),
datetime(2010, 4, 4),
datetime(2011, 4, 24),
),
(
Easter(2, method=EASTER_WESTERN),
datetime(2010, 4, 4),
datetime(2012, 4, 8),
),
(
-Easter(method=EASTER_WESTERN),
datetime(2011, 1, 1),
datetime(2010, 4, 4),
),
(
-Easter(method=EASTER_WESTERN),
datetime(2010, 4, 5),
datetime(2010, 4, 4),
),
(
-Easter(2, method=EASTER_WESTERN),
datetime(2011, 1, 1),
datetime(2009, 4, 12),
),
(
-Easter(method=EASTER_WESTERN),
datetime(2010, 4, 4),
datetime(2009, 4, 12),
),
(
-Easter(2, method=EASTER_WESTERN),
datetime(2010, 4, 4),
datetime(2008, 3, 23),
),
],
)
def test_western_easter_offset(self, offset, date, expected):
assert_offset_equal(offset, date, expected)
@pytest.mark.parametrize(
"offset,date,expected",
[
(
Easter(method=EASTER_ORTHODOX),
datetime(2010, 1, 1),
datetime(2010, 4, 4),
),
(
Easter(method=EASTER_ORTHODOX),
datetime(2010, 4, 5),
datetime(2011, 4, 24),
),
(
Easter(2, method=EASTER_ORTHODOX),
datetime(2010, 1, 1),
datetime(2011, 4, 24),
),
(
Easter(method=EASTER_ORTHODOX),
datetime(2010, 4, 4),
datetime(2011, 4, 24),
),
(
Easter(2, method=EASTER_ORTHODOX),
datetime(2010, 4, 4),
datetime(2012, 4, 15),
),
(
-Easter(method=EASTER_ORTHODOX),
datetime(2011, 1, 1),
datetime(2010, 4, 4),
),
(
-Easter(method=EASTER_ORTHODOX),
datetime(2010, 4, 5),
datetime(2010, 4, 4),
),
(
-Easter(2, method=EASTER_ORTHODOX),
datetime(2011, 1, 1),
datetime(2009, 4, 19),
),
(
-Easter(method=EASTER_ORTHODOX),
datetime(2010, 4, 4),
datetime(2009, 4, 19),
),
(
-Easter(2, method=EASTER_ORTHODOX),
datetime(2010, 4, 4),
datetime(2008, 4, 27),
),
],
)
def test_orthodox_easter_offset(self, offset, date, expected):
assert_offset_equal(offset, date, expected)
| TestEaster |
python | ipython__ipython | docs/autogen_shortcuts.py | {
"start": 710,
"end": 779
} | class ____:
description: str
identifier: str
@dataclass
| Handler |
python | fastapi__sqlmodel | docs_src/tutorial/fastapi/read_one/tutorial001_py310.py | {
"start": 385,
"end": 1527
} | class ____(HeroBase):
id: int
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, echo=True, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate):
with Session(engine) as session:
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int):
with Session(engine) as session:
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
| HeroPublic |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 12982,
"end": 13078
} | class ____(torch.nn.Module):
def forward(self, x):
return 1.5 * torch.cat(x, 1)
| _Block |
python | realpython__materials | python-enum/sort.py | {
"start": 24,
"end": 235
} | class ____(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
# Sort by name
sorted(Season, key=lambda season: season.name)
# Sort by value
sorted(Season, key=lambda season: season.value)
| Season |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/emr.py | {
"start": 10461,
"end": 13597
} | class ____(AwsBaseHook):
"""
Interact with Amazon EMR Serverless.
Provide thin wrapper around :py:class:`boto3.client("emr-serverless") <EMRServerless.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
JOB_INTERMEDIATE_STATES = {"PENDING", "RUNNING", "SCHEDULED", "SUBMITTED"}
JOB_FAILURE_STATES = {"FAILED", "CANCELLING", "CANCELLED"}
JOB_SUCCESS_STATES = {"SUCCESS"}
JOB_TERMINAL_STATES = JOB_SUCCESS_STATES.union(JOB_FAILURE_STATES)
APPLICATION_INTERMEDIATE_STATES = {"CREATING", "STARTING", "STOPPING"}
APPLICATION_FAILURE_STATES = {"STOPPED", "TERMINATED"}
APPLICATION_SUCCESS_STATES = {"CREATED", "STARTED"}
def __init__(self, *args: Any, **kwargs: Any) -> None:
kwargs["client_type"] = "emr-serverless"
super().__init__(*args, **kwargs)
def cancel_running_jobs(
self, application_id: str, waiter_config: dict | None = None, wait_for_completion: bool = True
) -> int:
"""
Cancel jobs in an intermediate state, and return the number of cancelled jobs.
If wait_for_completion is True, then the method will wait until all jobs are
cancelled before returning.
Note: if new jobs are triggered while this operation is ongoing,
it's going to time out and return an error.
"""
paginator = self.conn.get_paginator("list_job_runs")
results_per_response = 50
iterator = paginator.paginate(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES),
PaginationConfig={
"PageSize": results_per_response,
},
)
count = 0
for r in iterator:
job_ids = [jr["id"] for jr in r["jobRuns"]]
count += len(job_ids)
if job_ids:
self.log.info(
"Cancelling %s pending job(s) for the application %s so that it can be stopped",
len(job_ids),
application_id,
)
for job_id in job_ids:
self.conn.cancel_job_run(applicationId=application_id, jobRunId=job_id)
if wait_for_completion:
if count > 0:
self.log.info("now waiting for the %s cancelled job(s) to terminate", count)
self.get_waiter("no_job_running").wait(
applicationId=application_id,
states=list(self.JOB_INTERMEDIATE_STATES.union({"CANCELLING"})),
WaiterConfig=waiter_config or {},
)
return count
def is_connection_being_updated_exception(exception: BaseException) -> bool:
return (
isinstance(exception, ClientError)
and exception.response["Error"]["Code"] == "ValidationException"
and "is not reachable as its connection is currently being updated"
in exception.response["Error"]["Message"]
)
| EmrServerlessHook |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 167646,
"end": 168828
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
secret_key: str,
start_date: str,
lookback_window_days: Optional[int] = None,
):
r"""Airbyte Source for Paystack.
Documentation can be found at https://docs.airbyte.com/integrations/sources/paystack
Args:
name (str): The name of the destination.
secret_key (str): The Paystack API key (usually starts with 'sk_live\\_'; find yours here).
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
lookback_window_days (Optional[int]): When set, the connector will always reload data from the past N days, where N is the value set here. This is useful if your data is updated after creation.
"""
self.secret_key = check.str_param(secret_key, "secret_key")
self.start_date = check.str_param(start_date, "start_date")
self.lookback_window_days = check.opt_int_param(
lookback_window_days, "lookback_window_days"
)
super().__init__("Paystack", name)
| PaystackSource |
python | ansible__ansible | test/integration/targets/ansible-test-container/runme.py | {
"start": 38376,
"end": 39572
} | class ____(Bootstrapper):
"""Bootstrapper for apt based systems."""
@classmethod
def install_podman(cls) -> bool:
"""Return True if podman will be installed."""
return True
@classmethod
def install_docker(cls) -> bool:
"""Return True if docker will be installed."""
return True
@classmethod
def usable(cls) -> bool:
"""Return True if the bootstrapper can be used, otherwise False."""
return bool(shutil.which('apt-get'))
@classmethod
def run(cls) -> None:
"""Run the bootstrapper."""
apt_env = os.environ.copy()
apt_env.update(
DEBIAN_FRONTEND='noninteractive',
)
packages = ['docker.io']
if cls.install_podman():
# NOTE: Install crun to make it available to podman, otherwise installing docker.io can cause podman to use runc instead.
# Using podman rootless requires the `newuidmap` and `slirp4netns` commands.
packages.extend(('podman', 'crun', 'uidmap', 'slirp4netns'))
run_command('apt-get', 'install', *packages, '-y', '--no-install-recommends', env=apt_env)
super().run()
| AptBootstrapper |
python | google__flatbuffers | python/flatbuffers/table.py | {
"start": 652,
"end": 4818
} | class ____(object):
"""Table wraps a byte slice and provides read access to its data.
The variable `Pos` indicates the root of the FlatBuffers object therein.
"""
__slots__ = ("Bytes", "Pos")
def __init__(self, buf, pos):
N.enforce_number(pos, N.UOffsetTFlags)
self.Bytes = buf
self.Pos = pos
def Offset(self, vtableOffset):
"""Offset provides access into the Table's vtable.
Deprecated fields are ignored by checking the vtable's length.
"""
vtable = self.Pos - self.Get(N.SOffsetTFlags, self.Pos)
vtableEnd = self.Get(N.VOffsetTFlags, vtable)
if vtableOffset < vtableEnd:
return self.Get(N.VOffsetTFlags, vtable + vtableOffset)
return 0
def Indirect(self, off):
"""Indirect retrieves the relative offset stored at `offset`."""
N.enforce_number(off, N.UOffsetTFlags)
return off + encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
def String(self, off):
"""String gets a string from data stored inside the flatbuffer."""
N.enforce_number(off, N.UOffsetTFlags)
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
start = off + N.UOffsetTFlags.bytewidth
length = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return bytes(self.Bytes[start : start + length])
def VectorLen(self, off):
"""VectorLen retrieves the length of the vector whose offset is stored
at "off" in this object.
"""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
off += encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
ret = encode.Get(N.UOffsetTFlags.packer_type, self.Bytes, off)
return ret
def Vector(self, off):
"""Vector retrieves the start of data of the vector whose offset is
stored at "off" in this object.
"""
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
x = off + self.Get(N.UOffsetTFlags, off)
# data starts after metadata containing the vector length
x += N.UOffsetTFlags.bytewidth
return x
def Union(self, t2, off):
"""Union initializes any Table-derived type to point to the union at
the given offset.
"""
assert type(t2) is Table
N.enforce_number(off, N.UOffsetTFlags)
off += self.Pos
t2.Pos = off + self.Get(N.UOffsetTFlags, off)
t2.Bytes = self.Bytes
def Get(self, flags, off):
"""Get retrieves a value of the type specified by `flags` at the
given offset.
"""
N.enforce_number(off, N.UOffsetTFlags)
return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
def GetSlot(self, slot, d, validator_flags):
N.enforce_number(slot, N.VOffsetTFlags)
if validator_flags is not None:
N.enforce_number(d, validator_flags)
off = self.Offset(slot)
if off == 0:
return d
return self.Get(validator_flags, self.Pos + off)
def GetVectorAsNumpy(self, flags, off):
"""GetVectorAsNumpy returns the vector that starts at `Vector(off)`
as a numpy array with the type specified by `flags`. The array is
a `view` into Bytes, so modifying the returned array will
modify Bytes in place.
"""
offset = self.Vector(off)
length = self.VectorLen(off) # TODO: length accounts for bytewidth, right?
numpy_dtype = N.to_numpy_type(flags)
return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, offset)
def GetArrayAsNumpy(self, flags, off, length):
"""GetArrayAsNumpy returns the array with fixed width that starts at `Vector(offset)`
with length `length` as a numpy array with the type specified by `flags`.
The
array is a `view` into Bytes so modifying the returned will modify Bytes in
place.
"""
numpy_dtype = N.to_numpy_type(flags)
return encode.GetVectorAsNumpy(numpy_dtype, self.Bytes, length, off)
def GetVOffsetTSlot(self, slot, d):
"""GetVOffsetTSlot retrieves the VOffsetT that the given vtable location
points to. If the vtable value is zero, the default value `d`
will be returned.
"""
N.enforce_number(slot, N.VOffsetTFlags)
N.enforce_number(d, N.VOffsetTFlags)
off = self.Offset(slot)
if off == 0:
return d
return off
| Table |
python | pypa__pip | tests/lib/__init__.py | {
"start": 3630,
"end": 5952
} | class ____:
"""
Represents a bundle of pre-created test data.
This copies a pristine set of test data into a root location that is
designed to be test specific. The reason for this is when running the tests
concurrently errors can be generated because the related tooling uses
the directory as a work space. This leads to two concurrent processes
trampling over each other. This class gets around that by copying all
data into a directory and operating on the copied data.
"""
__test__ = False
def __init__(
self,
root: pathlib.Path,
source: pathlib.Path | None = None,
) -> None:
self.source = source or DATA_DIR
self.root = root.resolve()
@classmethod
def copy(cls, root: pathlib.Path) -> TestData:
obj = cls(root)
obj.reset()
return obj
def reset(self) -> None:
# Check explicitly for the target directory to avoid overly-broad
# try/except.
if self.root.exists():
shutil.rmtree(self.root)
shutil.copytree(self.source, self.root, symlinks=True)
@property
def packages(self) -> pathlib.Path:
return self.root.joinpath("packages")
@property
def packages2(self) -> pathlib.Path:
return self.root.joinpath("packages2")
@property
def packages3(self) -> pathlib.Path:
return self.root.joinpath("packages3")
@property
def src(self) -> pathlib.Path:
return self.root.joinpath("src")
@property
def indexes(self) -> pathlib.Path:
return self.root.joinpath("indexes")
@property
def reqfiles(self) -> pathlib.Path:
return self.root.joinpath("reqfiles")
@property
def completion_paths(self) -> pathlib.Path:
return self.root.joinpath("completion_paths")
@property
def find_links(self) -> str:
return self.packages.as_uri()
@property
def find_links2(self) -> str:
return self.packages2.as_uri()
@property
def find_links3(self) -> str:
return self.packages3.as_uri()
@property
def backends(self) -> str:
return self.root.joinpath("backends").as_uri()
def index_url(self, index: str = "simple") -> str:
return self.root.joinpath("indexes", index).as_uri()
| TestData |
python | huggingface__transformers | src/transformers/generation/logits_process.py | {
"start": 18818,
"end": 21539
} | class ____(LogitsProcessor):
r"""
[`LogitsProcessor`] that works similarly to [`RepetitionPenaltyLogitsProcessor`], but with an *inverse* penalty
that is applied to the tokens present in the prompt. In other words, a penalty above 1.0 increases the odds of
selecting tokens that were present in the prompt.
It was designed to avoid hallucination in input-grounded tasks, like summarization. Although originally intended
for encoder-decoder models, it can also be used with decoder-only models like LLMs.
Args:
penalty (`float`):
The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 rewards prompt tokens. Between 0.0
and 1.0 penalizes prompt tokens.
encoder_input_ids (`torch.LongTensor`):
The encoder_input_ids that should be repeated within the decoder ids.
Examples:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
>>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
>>> inputs = tokenizer(["Alice and Bob. The third member's name was"], return_tensors="pt")
>>> gen_out = model.generate(**inputs)
>>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
Alice and Bob. The third member's name was not mentioned.
>>> # With the `encoder_repetition_penalty` argument we can trigger this logits processor in `generate`, which can
>>> # promote the use of prompt tokens ("Bob" in this example)
>>> gen_out = model.generate(**inputs, encoder_repetition_penalty=1.2)
>>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
Alice and Bob. The third member's name was Bob. The third member's name was Bob.
```
"""
def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = 1 / penalty
self.encoder_input_ids = encoder_input_ids
@add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
score = torch.gather(scores, 1, self.encoder_input_ids)
# if score < 0 then hallucination penalty has to be multiplied to increase the token probabilities
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores_processed = scores.scatter(1, self.encoder_input_ids, score)
return scores_processed
| EncoderRepetitionPenaltyLogitsProcessor |
python | pypa__setuptools | setuptools/_vendor/importlib_metadata/_collections.py | {
"start": 51,
"end": 586
} | class ____(collections.defaultdict):
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
| FreezableDefaultDict |
python | PrefectHQ__prefect | tests/server/utilities/test_schemas.py | {
"start": 1524,
"end": 2399
} | class ____:
def test_extra_attributes_are_allowed_during_unit_tests(self):
class Model(PrefectBaseModel):
x: int
Model(x=1, y=2)
@pytest.mark.parametrize("falsey_value", ["0", "False", "", None])
def test_extra_attributes_are_allowed_outside_test_mode(
self, falsey_value: Optional[str]
):
with reload_prefect_base_model(falsey_value) as PrefectBaseModel:
class Model(PrefectBaseModel):
x: int
Model(x=1, y=2)
@pytest.mark.parametrize("truthy_value", ["1", "True", "true"])
def test_extra_attributes_are_allowed_with_truthy_test_mode(
self, truthy_value: Optional[str]
):
with reload_prefect_base_model(truthy_value) as PrefectBaseModel:
class Model(PrefectBaseModel):
x: int
Model(x=1, y=2)
| TestExtraForbidden |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 37253,
"end": 38458
} | class ____(InlineProcessor):
"""
Return a `mailto` link Element given an auto-mail link (`<foo@example.com>`).
"""
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
""" Return an [`Element`][xml.etree.ElementTree.Element] containing a `mailto` link of `group(1)`. """
el = etree.Element('a')
email = self.unescape(m.group(1))
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code: int) -> str:
"""Return entity definition by code, or the code if not defined."""
entity = entities.codepoint2name.get(code)
if entity:
return "{}{};".format(util.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = util.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el, m.start(0), m.end(0)
| AutomailInlineProcessor |
python | sympy__sympy | sympy/assumptions/predicates/order.py | {
"start": 3668,
"end": 4258
} | class ____(Predicate):
"""
Zero number predicate.
Explanation
===========
``ask(Q.zero(x))`` is true iff the value of ``x`` is zero.
Examples
========
>>> from sympy import ask, Q, oo, symbols
>>> x, y = symbols('x, y')
>>> ask(Q.zero(0))
True
>>> ask(Q.zero(1/oo))
True
>>> print(ask(Q.zero(0*oo)))
None
>>> ask(Q.zero(1))
False
>>> ask(Q.zero(x*y), Q.zero(x) | Q.zero(y))
True
"""
name = 'zero'
handler = Dispatcher(
"ZeroHandler",
doc="Handler for key 'zero'."
)
| ZeroPredicate |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 97052,
"end": 97260
} | class ____:
xlPortugueseBoth = 3 # from enum XlPortugueseReform
xlPortuguesePostReform = 2 # from enum XlPortugueseReform
xlPortuguesePreReform = 1 # from enum XlPortugueseReform
| PortugueseReform |
python | FactoryBoy__factory_boy | factory/django.py | {
"start": 2237,
"end": 6786
} | class ____(base.Factory[T]):
"""Factory for Django models.
This makes sure that the 'sequence' field of created objects is a new id.
Possible improvement: define a new 'attribute' type, AutoField, which would
handle those for non-numerical primary keys.
"""
_options_class = DjangoOptions
_original_params = None
class Meta:
abstract = True # Optional, but explicit.
@classmethod
def _load_model_class(cls, definition):
if isinstance(definition, str) and '.' in definition:
app, model = definition.split('.', 1)
return get_model(app, model)
return definition
@classmethod
def _get_manager(cls, model_class):
if model_class is None:
raise errors.AssociatedClassError(
f"No model set on {cls.__module__}.{cls.__name__}.Meta")
try:
manager = model_class.objects
except AttributeError:
# When inheriting from an abstract model with a custom
# manager, the class has no 'objects' field.
manager = model_class._default_manager
if cls._meta.database != DEFAULT_DB_ALIAS:
manager = manager.using(cls._meta.database)
return manager
@classmethod
def _generate(cls, strategy, params):
# Original params are used in _get_or_create if it cannot build an
# object initially due to an IntegrityError being raised
cls._original_params = params
return super()._generate(strategy, params)
@classmethod
def _get_or_create(cls, model_class, *args, **kwargs):
"""Create an instance of the model through objects.get_or_create."""
manager = cls._get_manager(model_class)
assert 'defaults' not in cls._meta.django_get_or_create, (
"'defaults' is a reserved keyword for get_or_create "
"(in %s._meta.django_get_or_create=%r)"
% (cls, cls._meta.django_get_or_create))
key_fields = {}
for field in cls._meta.django_get_or_create:
if field not in kwargs:
raise errors.FactoryError(
"django_get_or_create - "
"Unable to find initialization value for '%s' in factory %s" %
(field, cls.__name__))
key_fields[field] = kwargs.pop(field)
key_fields['defaults'] = kwargs
try:
instance, _created = manager.get_or_create(*args, **key_fields)
except IntegrityError as e:
if cls._original_params is None:
raise e
get_or_create_params = {
lookup: value
for lookup, value in cls._original_params.items()
if lookup in cls._meta.django_get_or_create
}
if get_or_create_params:
try:
instance = manager.get(**get_or_create_params)
except manager.model.DoesNotExist:
# Original params are not a valid lookup and triggered a create(),
# that resulted in an IntegrityError. Follow Django’s behavior.
raise e
else:
raise e
return instance
@classmethod
def _create(cls, model_class, *args, **kwargs):
"""Create an instance of the model, and save it to the database."""
if cls._meta.django_get_or_create:
return cls._get_or_create(model_class, *args, **kwargs)
manager = cls._get_manager(model_class)
return manager.create(*args, **kwargs)
# DEPRECATED. Remove this override with the next major release.
@classmethod
def _after_postgeneration(cls, instance, create, results=None):
"""Save again the instance if creating and at least one hook ran."""
if create and results and not cls._meta.skip_postgeneration_save:
warnings.warn(
f"{cls.__name__}._after_postgeneration will stop saving the instance "
"after postgeneration hooks in the next major release.\n"
"If the save call is extraneous, set skip_postgeneration_save=True "
f"in the {cls.__name__}.Meta.\n"
"To keep saving the instance, move the save call to your "
"postgeneration hooks or override _after_postgeneration.",
DeprecationWarning,
)
# Some post-generation hooks ran, and may have modified us.
instance.save()
| DjangoModelFactory |
python | doocs__leetcode | solution/0800-0899/0839.Similar String Groups/Solution.py | {
"start": 563,
"end": 906
} | class ____:
def numSimilarGroups(self, strs: List[str]) -> int:
n, m = len(strs), len(strs[0])
uf = UnionFind(n)
for i, s in enumerate(strs):
for j, t in enumerate(strs[:i]):
if sum(s[k] != t[k] for k in range(m)) <= 2 and uf.union(i, j):
n -= 1
return n
| Solution |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/aio/_connection.py | {
"start": 5343,
"end": 6107
} | class ____(BaseConnectionInfo):
"""Base connection information for Azure Database for PostgreSQL connections.
:param host: Hostname of the Azure Database for PostgreSQL server.
:type host: str | None
:param dbname: Name of the database to connect to.
:type dbname: str
:param port: Port number for the connection.
:type port: int
:param credentials: Credentials for authentication.
:type credentials: BasicAuth | AsyncTokenCredential
:param sslmode: SSL mode for the connection.
:type sslmode: SSLMode
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, # True to allow AsyncTokenCredential
)
credentials: BasicAuth | AsyncTokenCredential = DefaultAzureCredential()
| AsyncConnectionInfo |
python | redis__redis-py | redis/commands/search/index_definition.py | {
"start": 24,
"end": 131
} | class ____(Enum):
"""Enum of the currently supported index types."""
HASH = 1
JSON = 2
| IndexType |
python | coleifer__peewee | tests/keys.py | {
"start": 1535,
"end": 1581
} | class ____(TestModel):
tag = CharField()
| Tag |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.