language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
python/ray/data/tests/test_dynamic_block_split.py
|
{
"start": 3530,
"end": 13536
}
|
class ____(CSVDatasource):
def _read_stream(self, f: "pa.NativeFile", path: str):
for block in super()._read_stream(f, path):
time.sleep(3)
yield block
# Tests that we don't block on exponential rampup when doing bulk reads.
# https://github.com/ray-project/ray/issues/20625
@pytest.mark.parametrize("block_split", [False, True])
def test_bulk_lazy_eval_split_mode(shutdown_only, block_split, tmp_path):
# Defensively shutdown Ray for the first test here to make sure there
# is no existing Ray cluster.
ray.shutdown()
ray.init(num_cpus=8)
ctx = ray.data.context.DataContext.get_current()
ray.data.range(8, override_num_blocks=8).write_csv(str(tmp_path))
if not block_split:
# Setting a huge block size effectively disables block splitting.
ctx.target_max_block_size = 2**64
ds = ray.data.read_datasource(
SlowCSVDatasource(str(tmp_path)), override_num_blocks=8
)
start = time.time()
ds.map(lambda x: x)
delta = time.time() - start
print("full read time", delta)
# Should run in ~3 seconds. It takes >9 seconds if bulk read is broken.
assert delta < 8, delta
@pytest.mark.parametrize(
"compute",
[
"tasks",
"actors",
],
)
def test_dataset(
shutdown_only,
restore_data_context,
compute,
):
def identity_fn(x):
return x
def empty_fn(x):
return {}
class IdentityClass:
def __call__(self, x):
return x
class EmptyClass:
def __call__(self, x):
return {}
ctx = ray.data.DataContext.get_current()
# 1MiB.
ctx.target_max_block_size = 1024 * 1024
if compute == "tasks":
compute = ray.data._internal.compute.TaskPoolStrategy()
identity_func = identity_fn
empty_func = empty_fn
func_name = "identity_fn"
task_name = f"ReadRandomBytes->MapBatches({func_name})"
else:
compute = ray.data.ActorPoolStrategy()
identity_func = IdentityClass
empty_func = EmptyClass
func_name = "IdentityClass"
task_name = f"MapWorker(ReadRandomBytes->MapBatches({func_name})).submit"
ray.shutdown()
# We need at least 2 CPUs to run a actorpool streaming
ray.init(num_cpus=2, object_store_memory=1e9)
# Test 10 tasks, each task returning 10 blocks, each block has 1 row and each
# row has 1024 bytes.
num_blocks_per_task = 10
num_tasks = 10
@ray.remote
def warmup():
return np.zeros(ctx.target_max_block_size, dtype=np.uint8)
last_snapshot = get_initial_core_execution_metrics_snapshot()
ds = ray.data.read_datasource(
RandomBytesDatasource(
num_tasks=num_tasks,
num_batches_per_task=num_blocks_per_task,
row_size=ctx.target_max_block_size,
),
override_num_blocks=num_tasks,
)
# Note the following calls to ds will not fully execute it.
assert ds.schema() is not None
assert ds.count() == num_blocks_per_task * num_tasks
assert ds._plan.initial_num_blocks() == num_tasks
last_snapshot = assert_core_execution_metrics_equals(
CoreExecutionMetrics(
task_count={
"ReadRandomBytes": lambda count: count <= num_tasks,
},
object_store_stats={
"cumulative_created_plasma_bytes": lambda count: True,
"cumulative_created_plasma_objects": lambda count: True,
},
),
last_snapshot,
)
# Too-large blocks will get split to respect target max block size.
map_ds = ds.map_batches(identity_func, compute=compute)
map_ds = map_ds.materialize()
num_blocks_expected = num_tasks * num_blocks_per_task
assert map_ds._plan.initial_num_blocks() == num_blocks_expected
expected_actor_name = f"MapWorker(ReadRandomBytes->MapBatches({func_name}))"
assert_core_execution_metrics_equals(
CoreExecutionMetrics(
task_count={
f"{expected_actor_name}.__init__": lambda count: True,
f"{expected_actor_name}.get_location": lambda count: True,
task_name: num_tasks,
},
),
last_snapshot,
)
assert_blocks_expected_in_plasma(
last_snapshot,
num_blocks_expected,
block_size_expected=ctx.target_max_block_size,
)
# Blocks smaller than requested batch size will get coalesced.
map_ds = ds.map_batches(
empty_func,
batch_size=num_blocks_per_task * num_tasks,
compute=compute,
)
map_ds = map_ds.materialize()
assert map_ds._plan.initial_num_blocks() == 1
map_ds = ds.map(identity_func, compute=compute)
map_ds = map_ds.materialize()
assert map_ds._plan.initial_num_blocks() == num_blocks_per_task * num_tasks
ds_list = ds.split(5)
assert len(ds_list) == 5
for new_ds in ds_list:
assert new_ds._plan.initial_num_blocks() == num_blocks_per_task * num_tasks / 5
train, test = ds.train_test_split(test_size=0.25)
assert train._plan.initial_num_blocks() == num_blocks_per_task * num_tasks * 0.75
assert test._plan.initial_num_blocks() == num_blocks_per_task * num_tasks * 0.25
new_ds = ds.union(ds, ds)
assert new_ds._plan.initial_num_blocks() == num_tasks * 3
new_ds = new_ds.materialize()
assert new_ds._plan.initial_num_blocks() == num_blocks_per_task * num_tasks * 3
new_ds = ds.random_shuffle()
assert new_ds._plan.initial_num_blocks() == num_tasks
new_ds = ds.randomize_block_order()
assert new_ds._plan.initial_num_blocks() == num_tasks
assert ds.groupby("one").count().count() == num_blocks_per_task * num_tasks
new_ds = ds.zip(ds)
new_ds = new_ds.materialize()
assert new_ds._plan.initial_num_blocks() == num_blocks_per_task * num_tasks
assert len(ds.take(5)) == 5
assert len(ds.take_all()) == num_blocks_per_task * num_tasks
for batch in ds.iter_batches(batch_size=10):
assert len(batch["one"]) == 10
def test_filter(ray_start_regular_shared, target_max_block_size):
# Test 10 tasks, each task returning 10 blocks, each block has 1 row and each
# row has 1024 bytes.
num_blocks_per_task = 10
block_size = 1024
ds = ray.data.read_datasource(
RandomBytesDatasource(
num_tasks=1,
num_batches_per_task=num_blocks_per_task,
row_size=block_size,
),
override_num_blocks=1,
)
ds = ds.filter(lambda _: True)
ds = ds.materialize()
assert ds.count() == num_blocks_per_task
assert ds._plan.initial_num_blocks() == num_blocks_per_task
ds = ds.filter(lambda _: False)
ds = ds.materialize()
assert ds.count() == 0
assert ds._plan.initial_num_blocks() == num_blocks_per_task
@pytest.mark.skip("Needs zero-copy optimization for read->map_batches.")
def test_read_large_data(ray_start_cluster):
# Test 20G input with single task
num_blocks_per_task = 20
block_size = 1024 * 1024 * 1024
cluster = ray_start_cluster
cluster.add_node(num_cpus=1)
ray.init(cluster.address)
def foo(batch):
return pd.DataFrame({"one": [1]})
ds = ray.data.read_datasource(
RandomBytesDatasource(
num_tasks=1,
num_batches_per_task=num_blocks_per_task,
row_size=block_size,
),
override_num_blocks=1,
)
ds = ds.map_batches(foo, num_rows_per_batch=None)
assert ds.count() == num_blocks_per_task
def _test_write_large_data(
tmp_path, ext, write_fn, read_fn, use_bytes, write_kwargs=None
):
# Test 2G input with single task
num_blocks_per_task = 200
block_size = 10 * 1024 * 1024
ds = ray.data.read_datasource(
RandomBytesDatasource(
num_tasks=1,
num_batches_per_task=num_blocks_per_task,
row_size=block_size,
use_bytes=use_bytes,
),
override_num_blocks=1,
)
# This should succeed without OOM.
# https://github.com/ray-project/ray/pull/37966.
out_dir = os.path.join(tmp_path, ext)
write_kwargs = {} if write_kwargs is None else write_kwargs
write_fn(ds, out_dir, **write_kwargs)
max_heap_memory = ds._write_ds._get_stats_summary().get_max_heap_memory()
assert max_heap_memory < (num_blocks_per_task * block_size / 2), (
max_heap_memory,
ext,
)
# Make sure we can read out a record.
if read_fn is not None:
assert read_fn(out_dir).count() == num_blocks_per_task
def test_write_large_data_parquet(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path,
"parquet",
Dataset.write_parquet,
ray.data.read_parquet,
use_bytes=True,
)
def test_write_large_data_json(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path, "json", Dataset.write_json, ray.data.read_json, use_bytes=False
)
def test_write_large_data_numpy(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path,
"numpy",
Dataset.write_numpy,
ray.data.read_numpy,
use_bytes=False,
write_kwargs={"column": "one"},
)
def test_write_large_data_csv(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path, "csv", Dataset.write_csv, ray.data.read_csv, use_bytes=False
)
@pytest.mark.skipif(
sys.version_info >= (3, 12),
reason="Skip due to incompatibility tensorflow with Python 3.12+",
)
def test_write_large_data_tfrecords(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path,
"tfrecords",
Dataset.write_tfrecords,
ray.data.read_tfrecords,
use_bytes=True,
)
def test_write_large_data_webdataset(shutdown_only, tmp_path):
_test_write_large_data(
tmp_path,
"webdataset",
Dataset.write_webdataset,
ray.data.read_webdataset,
use_bytes=True,
)
@dataclass
|
SlowCSVDatasource
|
python
|
Pylons__pyramid
|
tests/test_config/test_actions.py
|
{
"start": 32447,
"end": 32635
}
|
class ____:
autocommit = False
info = ''
def __init__(self):
self.actions = []
def action(self, *arg, **kw):
self.actions.append((arg, kw))
|
DummyActionState
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/dlp.py
|
{
"start": 33064,
"end": 36772
}
|
class ____(GoogleCloudBaseOperator):
"""
Deletes a DeidentifyTemplate.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPDeleteDeidentifyTemplateOperator`
:param template_id: The ID of deidentify template to be deleted.
:param organization_id: (Optional) The organization ID. Required to set this
field if parent resource is an organization.
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. Only set this field if the parent resource is
a project instead of an organization.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"template_id",
"organization_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPDeidentifyTemplatesListLink(),)
def __init__(
self,
*,
template_id: str,
organization_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.template_id = template_id
self.organization_id = organization_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
hook.delete_deidentify_template(
template_id=self.template_id,
organization_id=self.organization_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPDeidentifyTemplatesListLink.persist(
context=context,
project_id=project_id,
)
except NotFound:
self.log.error("Template %s not found.", self.template_id)
|
CloudDLPDeleteDeidentifyTemplateOperator
|
python
|
pydata__xarray
|
xarray/testing/strategies.py
|
{
"start": 804,
"end": 18050
}
|
class ____(Protocol[T_DuckArray]):
def __call__(
self,
*,
shape: "_ShapeLike",
dtype: "_DTypeLikeNested",
) -> st.SearchStrategy[T_DuckArray]: ...
def supported_dtypes() -> st.SearchStrategy[np.dtype]:
"""
Generates only those numpy dtypes which xarray can handle.
Use instead of hypothesis.extra.numpy.scalar_dtypes in order to exclude weirder dtypes such as unicode, byte_string, array, or nested dtypes.
Also excludes datetimes, which dodges bugs with pandas non-nanosecond datetime overflows. Checks only native endianness.
Requires the hypothesis package to be installed.
See Also
--------
:ref:`testing.hypothesis`_
"""
# TODO should this be exposed publicly?
# We should at least decide what the set of numpy dtypes that xarray officially supports is.
return (
npst.integer_dtypes(endianness="=")
| npst.unsigned_integer_dtypes(endianness="=")
| npst.floating_dtypes(endianness="=")
| npst.complex_number_dtypes(endianness="=")
# | npst.datetime64_dtypes()
# | npst.timedelta64_dtypes()
# | npst.unicode_string_dtypes()
)
def pandas_index_dtypes() -> st.SearchStrategy[np.dtype]:
"""
Dtypes supported by pandas indexes.
Restrict datetime64 and timedelta64 to ns frequency till Xarray relaxes that.
"""
return (
npst.integer_dtypes(endianness="=", sizes=(32, 64))
| npst.unsigned_integer_dtypes(endianness="=", sizes=(32, 64))
| npst.floating_dtypes(endianness="=", sizes=(32, 64))
# TODO: unset max_period
| npst.datetime64_dtypes(endianness="=", max_period="ns")
# TODO: set max_period="D"
| npst.timedelta64_dtypes(endianness="=", max_period="ns")
| npst.unicode_string_dtypes(endianness="=")
)
def datetimes() -> st.SearchStrategy:
"""
Generates datetime objects including both standard library datetimes and cftime datetimes.
Returns standard library datetime.datetime objects, and if cftime is available,
also includes cftime datetime objects from various calendars.
Requires the hypothesis package to be installed.
See Also
--------
:ref:`testing.hypothesis`_
"""
strategy = st.datetimes()
if module_available("cftime"):
strategy = strategy | cftime_datetimes()
return strategy
# TODO Generalize to all valid unicode characters once formatting bugs in xarray's reprs are fixed + docs can handle it.
_readable_characters = st.characters(
categories=["L", "N"], max_codepoint=0x017F
) # only use characters within the "Latin Extended-A" subset of unicode
def names() -> st.SearchStrategy[str]:
"""
Generates arbitrary string names for dimensions / variables.
Requires the hypothesis package to be installed.
See Also
--------
:ref:`testing.hypothesis`_
"""
return st.text(
_readable_characters,
min_size=1,
max_size=5,
)
def dimension_names(
*,
name_strategy=None,
min_dims: int = 0,
max_dims: int = 3,
) -> st.SearchStrategy[list[Hashable]]:
"""
Generates an arbitrary list of valid dimension names.
Requires the hypothesis package to be installed.
Parameters
----------
name_strategy
Strategy for making names. Useful if we need to share this.
min_dims
Minimum number of dimensions in generated list.
max_dims
Maximum number of dimensions in generated list.
"""
if name_strategy is None:
name_strategy = names()
return st.lists(
elements=name_strategy,
min_size=min_dims,
max_size=max_dims,
unique=True,
)
def dimension_sizes(
*,
dim_names: st.SearchStrategy[Hashable] = names(), # noqa: B008
min_dims: int = 0,
max_dims: int = 3,
min_side: int = 1,
max_side: int | None = None,
) -> st.SearchStrategy[Mapping[Hashable, int]]:
"""
Generates an arbitrary mapping from dimension names to lengths.
Requires the hypothesis package to be installed.
Parameters
----------
dim_names: strategy generating strings, optional
Strategy for generating dimension names.
Defaults to the `names` strategy.
min_dims: int, optional
Minimum number of dimensions in generated list.
Default is 1.
max_dims: int, optional
Maximum number of dimensions in generated list.
Default is 3.
min_side: int, optional
Minimum size of a dimension.
Default is 1.
max_side: int, optional
Minimum size of a dimension.
Default is `min_length` + 5.
See Also
--------
:ref:`testing.hypothesis`_
"""
if max_side is None:
max_side = min_side + 3
return st.dictionaries(
keys=dim_names,
values=st.integers(min_value=min_side, max_value=max_side),
min_size=min_dims,
max_size=max_dims,
)
_readable_strings = st.text(
_readable_characters,
max_size=5,
)
_attr_keys = _readable_strings
_small_arrays = npst.arrays(
shape=npst.array_shapes(
max_side=2,
max_dims=2,
),
dtype=npst.scalar_dtypes()
| npst.byte_string_dtypes()
| npst.unicode_string_dtypes(),
)
_attr_values = st.none() | st.booleans() | _readable_strings | _small_arrays
simple_attrs = st.dictionaries(_attr_keys, _attr_values)
def attrs() -> st.SearchStrategy[Mapping[Hashable, Any]]:
"""
Generates arbitrary valid attributes dictionaries for xarray objects.
The generated dictionaries can potentially be recursive.
Requires the hypothesis package to be installed.
See Also
--------
:ref:`testing.hypothesis`_
"""
return st.recursive(
st.dictionaries(_attr_keys, _attr_values),
lambda children: st.dictionaries(_attr_keys, children),
max_leaves=3,
)
ATTRS = attrs()
@st.composite
def variables(
draw: st.DrawFn,
*,
array_strategy_fn: ArrayStrategyFn | None = None,
dims: st.SearchStrategy[Sequence[Hashable] | Mapping[Hashable, int]] | None = None,
dtype: st.SearchStrategy[np.dtype] | None = None,
attrs: st.SearchStrategy[Mapping] = ATTRS,
) -> xr.Variable:
"""
Generates arbitrary xarray.Variable objects.
Follows the basic signature of the xarray.Variable constructor, but allows passing alternative strategies to
generate either numpy-like array data or dimensions. Also allows specifying the shape or dtype of the wrapped array
up front.
Passing nothing will generate a completely arbitrary Variable (containing a numpy array).
Requires the hypothesis package to be installed.
Parameters
----------
array_strategy_fn: Callable which returns a strategy generating array-likes, optional
Callable must only accept shape and dtype kwargs, and must generate results consistent with its input.
If not passed the default is to generate a small numpy array with one of the supported_dtypes.
dims: Strategy for generating the dimensions, optional
Can either be a strategy for generating a sequence of string dimension names,
or a strategy for generating a mapping of string dimension names to integer lengths along each dimension.
If provided as a mapping the array shape will be passed to array_strategy_fn.
Default is to generate arbitrary dimension names for each axis in data.
dtype: Strategy which generates np.dtype objects, optional
Will be passed in to array_strategy_fn.
Default is to generate any scalar dtype using supported_dtypes.
Be aware that this default set of dtypes includes some not strictly allowed by the array API standard.
attrs: Strategy which generates dicts, optional
Default is to generate a nested attributes dictionary containing arbitrary strings, booleans, integers, Nones,
and numpy arrays.
Returns
-------
variable_strategy
Strategy for generating xarray.Variable objects.
Raises
------
ValueError
If a custom array_strategy_fn returns a strategy which generates an example array inconsistent with the shape
& dtype input passed to it.
Examples
--------
Generate completely arbitrary Variable objects backed by a numpy array:
>>> variables().example() # doctest: +SKIP
<xarray.Variable (żō: 3)>
array([43506, -16, -151], dtype=int32)
>>> variables().example() # doctest: +SKIP
<xarray.Variable (eD: 4, ğŻżÂĕ: 2, T: 2)>
array([[[-10000000., -10000000.],
[-10000000., -10000000.]],
[[-10000000., -10000000.],
[ 0., -10000000.]],
[[ 0., -10000000.],
[-10000000., inf]],
[[ -0., -10000000.],
[-10000000., -0.]]], dtype=float32)
Attributes:
śřĴ: {'ĉ': {'iĥf': array([-30117, -1740], dtype=int16)}}
Generate only Variable objects with certain dimension names:
>>> variables(dims=st.just(["a", "b"])).example() # doctest: +SKIP
<xarray.Variable (a: 5, b: 3)>
array([[ 248, 4294967295, 4294967295],
[2412855555, 3514117556, 4294967295],
[ 111, 4294967295, 4294967295],
[4294967295, 1084434988, 51688],
[ 47714, 252, 11207]], dtype=uint32)
Generate only Variable objects with certain dimension names and lengths:
>>> variables(dims=st.just({"a": 2, "b": 1})).example() # doctest: +SKIP
<xarray.Variable (a: 2, b: 1)>
array([[-1.00000000e+007+3.40282347e+038j],
[-2.75034266e-225+2.22507386e-311j]])
See Also
--------
:ref:`testing.hypothesis`_
"""
if dtype is None:
dtype = supported_dtypes()
if not isinstance(dims, st.SearchStrategy) and dims is not None:
raise InvalidArgument(
f"dims must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dims)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
if not isinstance(dtype, st.SearchStrategy) and dtype is not None:
raise InvalidArgument(
f"dtype must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(dtype)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
if not isinstance(attrs, st.SearchStrategy) and attrs is not None:
raise InvalidArgument(
f"attrs must be provided as a hypothesis.strategies.SearchStrategy object (or None), but got type {type(attrs)}. "
"To specify fixed contents, use hypothesis.strategies.just()."
)
_array_strategy_fn: ArrayStrategyFn
if array_strategy_fn is None:
# For some reason if I move the default value to the function signature definition mypy incorrectly says the ignore is no longer necessary, making it impossible to satisfy mypy
_array_strategy_fn = npst.arrays # type: ignore[assignment] # npst.arrays has extra kwargs that we aren't using later
elif not callable(array_strategy_fn):
raise InvalidArgument(
"array_strategy_fn must be a Callable that accepts the kwargs dtype and shape and returns a hypothesis "
"strategy which generates corresponding array-like objects."
)
else:
_array_strategy_fn = (
array_strategy_fn # satisfy mypy that this new variable cannot be None
)
_dtype = draw(dtype)
if dims is not None:
# generate dims first then draw data to match
_dims = draw(dims)
if isinstance(_dims, Sequence):
dim_names = list(_dims)
valid_shapes = npst.array_shapes(min_dims=len(_dims), max_dims=len(_dims))
_shape = draw(valid_shapes)
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
elif isinstance(_dims, Mapping | dict):
# should be a mapping of form {dim_names: lengths}
dim_names, _shape = list(_dims.keys()), tuple(_dims.values())
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
else:
raise InvalidArgument(
f"Invalid type returned by dims strategy - drew an object of type {type(dims)}"
)
else:
# nothing provided, so generate everything consistently
# We still generate the shape first here just so that we always pass shape to array_strategy_fn
_shape = draw(npst.array_shapes())
array_strategy = _array_strategy_fn(shape=_shape, dtype=_dtype)
dim_names = draw(dimension_names(min_dims=len(_shape), max_dims=len(_shape)))
_data = draw(array_strategy)
if _data.shape != _shape:
raise ValueError(
"array_strategy_fn returned an array object with a different shape than it was passed."
f"Passed {_shape}, but returned {_data.shape}."
"Please either specify a consistent shape via the dims kwarg or ensure the array_strategy_fn callable "
"obeys the shape argument passed to it."
)
if _data.dtype != _dtype:
raise ValueError(
"array_strategy_fn returned an array object with a different dtype than it was passed."
f"Passed {_dtype}, but returned {_data.dtype}"
"Please either specify a consistent dtype via the dtype kwarg or ensure the array_strategy_fn callable "
"obeys the dtype argument passed to it."
)
return xr.Variable(dims=dim_names, data=_data, attrs=draw(attrs))
@overload
def unique_subset_of(
objs: Sequence[Hashable],
*,
min_size: int = 0,
max_size: int | None = None,
) -> st.SearchStrategy[Sequence[Hashable]]: ...
@overload
def unique_subset_of(
objs: Mapping[Hashable, Any],
*,
min_size: int = 0,
max_size: int | None = None,
) -> st.SearchStrategy[Mapping[Hashable, Any]]: ...
@st.composite
def unique_subset_of(
draw: st.DrawFn,
objs: Sequence[Hashable] | Mapping[Hashable, Any],
*,
min_size: int = 0,
max_size: int | None = None,
) -> Sequence[Hashable] | Mapping[Hashable, Any]:
"""
Return a strategy which generates a unique subset of the given objects.
Each entry in the output subset will be unique (if input was a sequence) or have a unique key (if it was a mapping).
Requires the hypothesis package to be installed.
Parameters
----------
objs: Union[Sequence[Hashable], Mapping[Hashable, Any]]
Objects from which to sample to produce the subset.
min_size: int, optional
Minimum size of the returned subset. Default is 0.
max_size: int, optional
Maximum size of the returned subset. Default is the full length of the input.
If set to 0 the result will be an empty mapping.
Returns
-------
unique_subset_strategy
Strategy generating subset of the input.
Examples
--------
>>> unique_subset_of({"x": 2, "y": 3}).example() # doctest: +SKIP
{'y': 3}
>>> unique_subset_of(["x", "y"]).example() # doctest: +SKIP
['x']
See Also
--------
:ref:`testing.hypothesis`_
"""
if not isinstance(objs, Iterable):
raise TypeError(
f"Object to sample from must be an Iterable or a Mapping, but received type {type(objs)}"
)
if len(objs) == 0:
raise ValueError("Can't sample from a length-zero object.")
keys = list(objs.keys()) if isinstance(objs, Mapping) else objs
subset_keys = draw(
st.lists(
st.sampled_from(keys),
unique=True,
min_size=min_size,
max_size=max_size,
)
)
return (
{k: objs[k] for k in subset_keys} if isinstance(objs, Mapping) else subset_keys
)
@st.composite
def cftime_datetimes(draw: st.DrawFn):
"""
Generates cftime datetime objects across various calendars.
This strategy generates cftime datetime objects from all available
cftime calendars with dates ranging from year -99999 to 99999.
Requires both the hypothesis and cftime packages to be installed.
Returns
-------
cftime_datetime_strategy
Strategy for generating cftime datetime objects.
See Also
--------
:ref:`testing.hypothesis`_
"""
from xarray.tests import _all_cftime_date_types
date_types = _all_cftime_date_types()
calendars = list(date_types)
calendar = draw(st.sampled_from(calendars))
date_type = date_types[calendar]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*date/calendar/year zero.*")
daysinmonth = date_type(99999, 12, 1).daysinmonth
min_value = date_type(-99999, 1, 1)
max_value = date_type(99999, 12, daysinmonth, 23, 59, 59, 999999)
unit_microsecond = datetime.timedelta(microseconds=1)
timespan_microseconds = (max_value - min_value) // unit_microsecond
microseconds_offset = draw(st.integers(0, timespan_microseconds))
return min_value + datetime.timedelta(microseconds=microseconds_offset)
|
ArrayStrategyFn
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/reddit/provider.py
|
{
"start": 262,
"end": 688
}
|
class ____(OAuth2Provider):
id = "reddit"
name = "Reddit"
account_class = RedditAccount
oauth2_adapter_class = RedditAdapter
def extract_uid(self, data):
return data["name"]
def extract_common_fields(self, data):
return dict(username=data.get("name"))
def get_default_scope(self):
scope = ["identity"]
return scope
provider_classes = [RedditProvider]
|
RedditProvider
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/exc.py
|
{
"start": 10132,
"end": 10302
}
|
class ____(InvalidRequestError):
"""A transaction has failed and needs to be rolled back before
continuing.
.. versionadded:: 1.4
"""
|
PendingRollbackError
|
python
|
walkccc__LeetCode
|
solutions/1453. Maximum Number of Darts Inside of a Circular Dartboard/1453.py
|
{
"start": 87,
"end": 1001
}
|
class ____:
def numPoints(self, darts: list[list[int]], r: int) -> int:
ERR = 1e-6
ans = 1
points = [Point(x, y) for x, y in darts]
def dist(p: Point, q: Point) -> float:
return ((p.x - q.x)**2 + (p.y - q.y)**2)**0.5
def getCircles(p: Point, q: Point) -> list[Point]:
if dist(p, q) - 2.0 * r > ERR:
return []
m = Point((p.x + q.x) / 2, (p.y + q.y) / 2)
distCM = (r**2 - (dist(p, q) / 2)**2)**0.5
alpha = math.atan2(p.y - q.y, q.x - p.x)
return [Point(m.x - distCM * math.sin(alpha), m.y - distCM * math.cos(alpha)),
Point(m.x + distCM * math.sin(alpha), m.y + distCM * math.cos(alpha))]
for i, j in itertools.combinations(points, 2):
for c in getCircles(i, j):
count = 0
for point in points:
if dist(c, point) - r <= ERR:
count += 1
ans = max(ans, count)
return ans
|
Solution
|
python
|
ray-project__ray
|
doc/source/ray-core/doc_code/streaming_generator.py
|
{
"start": 1068,
"end": 1188
}
|
class ____:
async def f(self):
for i in range(5):
yield i
@ray.remote(max_concurrency=5)
|
AsyncActor
|
python
|
getsentry__sentry
|
tests/sentry/integrations/vsts/test_client.py
|
{
"start": 1184,
"end": 17932
}
|
class ____(VstsIntegrationTestCase):
@pytest.fixture(autouse=True)
def _setup_metric_patch(self):
with mock.patch("sentry.shared_integrations.client.base.metrics") as self.metrics:
yield
def test_refreshes_expired_token(self) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
# Make the Identity have an expired token
idp = IdentityProvider.objects.get(external_id=self.vsts_account_id)
identity = Identity.objects.get(idp_id=idp.id)
identity.data["expires"] = int(time()) - int(123456789)
identity.save()
# New values VSTS will return on refresh
self.access_token = "new-access-token"
self.refresh_token = "new-refresh-token"
self._stub_vsts()
# Make a request with expired token
installation.get_client().get_projects()
# Second to last request, before the Projects request, was to refresh
# the Access Token.
assert responses.calls[-2].request.url == "https://app.vssps.visualstudio.com/oauth2/token"
# Then we request the Projects with the new token
assert (
responses.calls[-1].request.url.split("?")[0]
== f"{self.vsts_base_url.lower()}_apis/projects"
)
identity = Identity.objects.get(id=identity.id)
assert identity.scopes == [
"vso.code",
"vso.graph",
"vso.serviceendpoint_manage",
"vso.work_write",
]
assert identity.data["access_token"] == "new-access-token"
assert identity.data["refresh_token"] == "new-refresh-token"
assert identity.data["expires"] > int(time())
@with_feature("organizations:migrate-azure-devops-integration")
def test_refreshes_expired_token_new_integration(self) -> None:
self.assert_installation(new=True)
integration, installation = self._get_integration_and_install()
# Make the Identity have an expired token
idp = IdentityProvider.objects.get(external_id=self.vsts_account_id)
identity = Identity.objects.get(idp_id=idp.id)
identity.data["expires"] = int(time()) - int(123456789)
identity.save()
# New values VSTS will return on refresh
self.access_token = "new-access-token"
self.refresh_token = "new-refresh-token"
self._stub_vsts()
# Make a request with expired token
installation.get_client().get_projects()
# Second to last request, before the Projects request, was to refresh
# the Access Token.
assert (
responses.calls[-2].request.url
== "https://login.microsoftonline.com/common/oauth2/v2.0/token"
)
# Then we request the Projects with the new token
assert (
responses.calls[-1].request.url.split("?")[0]
== f"{self.vsts_base_url.lower()}_apis/projects"
)
identity = Identity.objects.get(id=identity.id)
assert set(identity.scopes) == set(VstsIntegrationProvider.NEW_SCOPES)
assert identity.data["access_token"] == "new-access-token"
assert identity.data["refresh_token"] == "new-refresh-token"
assert identity.data["expires"] > int(time())
@responses.activate
def test_does_not_refresh_valid_tokens(self) -> None:
self.assert_installation()
responses.reset()
integration, installation = self._get_integration_and_install()
# Make the Identity have a non-expired token
idp = IdentityProvider.objects.get(external_id=self.vsts_account_id)
identity = Identity.objects.get(idp_id=idp.id)
expires = int(time()) + int(123456789)
identity.data["expires"] = expires
access_token = identity.data["access_token"]
refresh_token = identity.data["refresh_token"]
identity.save()
# New values VSTS will return on refresh
self.access_token = "new-access-token"
self.refresh_token = "new-refresh-token"
self._stub_vsts()
# Make a request
installation.get_client().get_projects()
assert len(responses.calls) == 1
assert (
responses.calls[0].request.url
== "https://myvstsaccount.visualstudio.com/_apis/projects?stateFilter=WellFormed&%24skip=0&%24top=100"
)
assert identity.data["access_token"] == access_token != self.access_token
assert identity.data["refresh_token"] == refresh_token != self.refresh_token
assert identity.data["expires"] == expires
def test_project_pagination(self) -> None:
def request_callback(request):
query = parse_qs(request.url.split("?")[1])
# allow for 220 responses
if int(query["$skip"][0]) >= 200:
projects = [self.project_a, self.project_b] * 10
else:
projects = [self.project_a, self.project_b] * 50
resp_body = {"value": projects, "count": len(projects)}
return 200, {}, orjson.dumps(resp_body).decode()
self.assert_installation()
responses.reset()
integration, installation = self._get_integration_and_install()
responses.add_callback(
responses.GET,
f"https://{self.vsts_account_name.lower()}.visualstudio.com/_apis/projects",
callback=request_callback,
)
projects = installation.get_client().get_projects()
assert len(projects) == 220
@with_feature("organizations:migrate-azure-devops-integration")
def test_metadata_is_correct(self) -> None:
self.assert_installation(new=True)
integration, installation = self._get_integration_and_install()
assert integration.metadata["domain_name"] == "https://MyVSTSAccount.visualstudio.com/"
assert set(integration.metadata["scopes"]) == set(VstsIntegrationProvider.NEW_SCOPES)
assert (
integration.metadata["integration_migration_version"]
== VstsIntegrationProvider.CURRENT_MIGRATION_VERSION
)
@responses.activate
def test_simple(self) -> None:
responses.add(
responses.GET,
"https://myvstsaccount.visualstudio.com/_apis/git/repositories/albertos-apples/commits",
body=b"{}",
match=[matchers.query_param_matcher({"commit": "b", "$top": "10"})],
)
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
client = installation.get_client()
responses.calls.reset()
assert repo.external_id is not None
client.get_commits(repo_id=repo.external_id, commit="b", limit=10)
assert len(responses.calls) == 1
# Check if metrics is generated properly
calls = [
call("integrations.http_request", sample_rate=1.0, tags={"integration": "vsts"}),
call(
"integrations.http_response",
sample_rate=1.0,
tags={"integration": "vsts", "status": 200},
),
] * 5
assert self.metrics.incr.mock_calls == calls
@responses.activate
def test_check_file(self) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
client = installation.get_client()
path = "src/sentry/integrations/vsts/client.py"
version = "master"
url = f"https://myvstsaccount.visualstudio.com/project-name/_apis/git/repositories/{repo.name}/items?path={path}&api-version=7.0&versionDescriptor.version={version}"
responses.add(
method=responses.GET,
url=url,
json={"text": 200},
)
resp = client.check_file(repo, path, version)
assert resp
assert getattr(resp, "status_code") == 200
@responses.activate
@mock.patch(
"sentry.integrations.vsts.client.VstsApiClient.check_file",
side_effect=ApiUnauthorized(text="Unauthorized"),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_check_file_unauthorized(
self, mock_record_event: MagicMock, mock_check_file: MagicMock
) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
path = "src/sentry/integrations/vsts/client.py"
version = "master"
installation.check_file(repo, path, version)
assert_halt_metric(mock_record_event, ApiUnauthorized("Unauthorized"))
@responses.activate
def test_check_no_file(self) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
client = installation.get_client()
path = "src/sentry/integrations/vsts/client.py"
version = "master"
url = f"https://myvstsaccount.visualstudio.com/project-name/_apis/git/repositories/{repo.name}/items?path={path}&api-version=7.0&versionDescriptor.version={version}"
responses.add(method=responses.HEAD, url=url, status=404)
with pytest.raises(ApiError):
client.check_file(repo, path, version)
@responses.activate
def test_get_file(self) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
client = installation.get_client()
path = "README.md"
version = "master"
url = f"https://myvstsaccount.visualstudio.com/project-name/_apis/git/repositories/{repo.name}/items?path={path}&api-version=7.0&versionDescriptor.version={version}&download=true"
responses.add(method=responses.GET, url=url, body="Hello, world!")
resp = client.get_file(repo, path, version)
assert resp == "Hello, world!"
@responses.activate
def test_get_stacktrace_link(self) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
path = "/src/sentry/integrations/vsts/client.py"
version = "master"
url = f"https://myvstsaccount.visualstudio.com/project-name/_apis/git/repositories/{repo.name}/items?path={path.lstrip('/')}&api-version=7.0&versionDescriptor.version={version}"
responses.add(
method=responses.GET,
url=url,
json={"text": 200},
)
source_url = installation.get_stacktrace_link(repo, path, "master", version)
assert (
source_url
== f"https://MyVSTSAccount.visualstudio.com/project-name/_git/{repo.name}?path={quote_plus(path)}&version=GB{version}"
)
@responses.activate
@mock.patch(
"sentry.integrations.vsts.client.VstsApiClient.check_file",
side_effect=ApiError(
text='{"$id":"1","innerException":null,"message":"According to Microsoft Entra, your Identity xxx is currently Disabled within the following Microsoft Entra tenant: xxx. Please contact your Microsoft Entra administrator to resolve this.","typeName":"Microsoft.TeamFoundation.Framework.Server.AadUserStateException, Microsoft.TeamFoundation.Framework.Server","typeKey":"AadUserStateException","errorCode":0,"eventId":3000}'
),
)
@mock.patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_get_stacktrace_link_identity_deleted(
self, mock_record: MagicMock, mock_check_file: MagicMock
) -> None:
self.assert_installation()
integration, installation = self._get_integration_and_install()
with assume_test_silo_mode(SiloMode.REGION):
repo = Repository.objects.create(
provider="visualstudio",
name="example",
organization_id=self.organization.id,
config={
"instance": self.vsts_base_url,
"project": "project-name",
"name": "example",
},
integration_id=integration.id,
external_id="albertos-apples",
)
path = "/src/sentry/integrations/vsts/client.py"
version = "master"
url = f"https://myvstsaccount.visualstudio.com/project-name/_apis/git/repositories/{repo.name}/items?path={path.lstrip('/')}&api-version=7.0&versionDescriptor.version={version}"
responses.add(
method=responses.GET,
url=url,
json={"text": 200},
)
source_url = installation.get_stacktrace_link(repo, path, "master", version)
assert source_url is None
halt = mock_record.mock_calls[-2]
assert halt.args[0] == EventLifecycleOutcome.HALTED
assert (
halt.args[1].text
== '{"$id":"1","innerException":null,"message":"According to Microsoft Entra, your Identity xxx is currently Disabled within the following Microsoft Entra tenant: xxx. Please contact your Microsoft Entra administrator to resolve this.","typeName":"Microsoft.TeamFoundation.Framework.Server.AadUserStateException, Microsoft.TeamFoundation.Framework.Server","typeKey":"AadUserStateException","errorCode":0,"eventId":3000}'
)
def assert_proxy_request(request, is_proxy=True):
assert (PROXY_BASE_PATH in request.url) == is_proxy
assert (PROXY_OI_HEADER in request.headers) == is_proxy
assert (PROXY_SIGNATURE_HEADER in request.headers) == is_proxy
assert ("Authorization" in request.headers) != is_proxy
if is_proxy:
assert request.headers[PROXY_OI_HEADER] is not None
|
VstsApiClientTest
|
python
|
apache__airflow
|
providers/git/tests/unit/git/bundles/test_git.py
|
{
"start": 2705,
"end": 33536
}
|
class ____:
@classmethod
def teardown_class(cls) -> None:
return
# TODO: Potential performance issue, converted setup_class to a setup_connections function level fixture
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db, request):
# Skip setup for tests that need to create their own connections
if request.function.__name__ in ["test_view_url", "test_view_url_subdir"]:
return
create_connection_without_db(
Connection(
conn_id="git_default",
host="git@github.com:apache/airflow.git",
conn_type="git",
)
)
create_connection_without_db(
Connection(
conn_id=CONN_HTTPS,
host=AIRFLOW_HTTPS_URL,
password=ACCESS_TOKEN,
conn_type="git",
)
)
create_connection_without_db(
Connection(
conn_id=CONN_NO_REPO_URL,
conn_type="git",
)
)
def test_supports_versioning(self):
assert GitDagBundle.supports_versioning is True
def test_uses_dag_bundle_root_storage_path(self):
bundle = GitDagBundle(name="test", tracking_ref=GIT_DEFAULT_BRANCH)
base = get_bundle_storage_root_path()
assert bundle.path.is_relative_to(base)
def test_repo_url_overrides_connection_host_when_provided(self):
bundle = GitDagBundle(name="test", tracking_ref=GIT_DEFAULT_BRANCH, repo_url="/some/other/repo")
assert bundle.repo_url == "/some/other/repo"
def test_https_access_token_repo_url_overrides_connection_host_when_provided(self):
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
tracking_ref=GIT_DEFAULT_BRANCH,
repo_url="https://github.com/apache/zzzairflow",
)
assert bundle.repo_url == f"https://user:{ACCESS_TOKEN}@github.com/apache/zzzairflow"
def test_falls_back_to_connection_host_when_no_repo_url_provided(self):
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref=GIT_DEFAULT_BRANCH)
assert bundle.repo_url == bundle.hook.repo_url
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_get_current_version(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref=GIT_DEFAULT_BRANCH)
bundle.initialize()
assert bundle.get_current_version() == repo.head.commit.hexsha
assert_repo_is_closed(bundle)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_get_specific_version(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
# Add new file to the repo
file_path = repo_path / "new_test.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
repo.index.commit("Another commit")
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
version=starting_commit.hexsha,
tracking_ref=GIT_DEFAULT_BRANCH,
prune_dotgit_folder=False,
)
bundle.initialize()
assert bundle.get_current_version() == starting_commit.hexsha
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py"} == files_in_repo
assert_repo_is_closed(bundle)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_get_tag_version(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
# add tag
repo.create_tag("test")
# Add new file to the repo
file_path = repo_path / "new_test.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
repo.index.commit("Another commit")
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
version="test",
tracking_ref=GIT_DEFAULT_BRANCH,
prune_dotgit_folder=False,
)
bundle.initialize()
assert bundle.get_current_version() == starting_commit.hexsha
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py"} == files_in_repo
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_get_latest(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
file_path = repo_path / "new_test.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
repo.index.commit("Another commit")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref=GIT_DEFAULT_BRANCH)
bundle.initialize()
assert bundle.get_current_version() != starting_commit.hexsha
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py", "new_test.py"} == files_in_repo
assert_repo_is_closed(bundle)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_removes_git_dir_for_versioned_bundle_by_default(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
version=starting_commit.hexsha,
tracking_ref=GIT_DEFAULT_BRANCH,
)
bundle.initialize()
assert not (bundle.repo_path / ".git").exists()
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py"} == files_in_repo
assert_repo_is_closed(bundle)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_keeps_git_dir_when_disabled(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
version=starting_commit.hexsha,
tracking_ref=GIT_DEFAULT_BRANCH,
prune_dotgit_folder=False,
)
bundle.initialize()
assert (bundle.repo_path / ".git").exists()
assert bundle.get_current_version() == starting_commit.hexsha
assert_repo_is_closed(bundle)
@pytest.mark.parametrize(
"amend",
[
True,
False,
],
)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_refresh(self, mock_githook, git_repo, amend):
"""Ensure that the bundle refresh works when tracking a branch, with a new commit and amending the commit"""
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
with repo.config_writer() as writer:
writer.set_value("user", "name", "Test User")
writer.set_value("user", "email", "test@example.com")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref=GIT_DEFAULT_BRANCH)
bundle.initialize()
assert bundle.get_current_version() == starting_commit.hexsha
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py"} == files_in_repo
file_path = repo_path / "new_test.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
commit = repo.git.commit(amend=amend, message="Another commit")
bundle.refresh()
assert bundle.get_current_version()[:6] in commit
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py", "new_test.py"} == files_in_repo
assert_repo_is_closed(bundle)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_refresh_tag(self, mock_githook, git_repo):
"""Ensure that the bundle refresh works when tracking a tag"""
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
starting_commit = repo.head.commit
# add tag
repo.create_tag("test123")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref="test123")
bundle.initialize()
assert bundle.get_current_version() == starting_commit.hexsha
# Add new file to the repo
file_path = repo_path / "new_test.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
commit = repo.index.commit("Another commit")
# update tag
repo.create_tag("test123", force=True)
bundle.refresh()
assert bundle.get_current_version() == commit.hexsha
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert {"test_dag.py", "new_test.py"} == files_in_repo
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_head(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
repo.create_head("test")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref="test")
bundle.initialize()
assert bundle.repo.head.ref.name == "test"
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_version_not_found(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
version="not_found",
tracking_ref=GIT_DEFAULT_BRANCH,
)
with pytest.raises(AirflowException, match="Version not_found not found in the repository"):
bundle.initialize()
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_subdir(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
subdir = "somesubdir"
subdir_path = repo_path / subdir
subdir_path.mkdir()
file_path = subdir_path / "some_new_file.py"
with open(file_path, "w") as f:
f.write("hello world")
repo.index.add([file_path])
repo.index.commit("Initial commit")
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_HTTPS,
tracking_ref=GIT_DEFAULT_BRANCH,
subdir=subdir,
)
bundle.initialize()
files_in_repo = {f.name for f in bundle.path.iterdir() if f.is_file()}
assert str(bundle.path).endswith(subdir)
assert {"some_new_file.py"} == files_in_repo
def test_raises_when_no_repo_url(self):
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_NO_REPO_URL,
tracking_ref=GIT_DEFAULT_BRANCH,
)
with pytest.raises(AirflowException, match=f"Connection {CONN_NO_REPO_URL} doesn't have a host url"):
bundle.initialize()
@mock.patch("airflow.providers.git.bundles.git.GitHook")
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_with_path_as_repo_url(self, mock_gitRepo, mock_githook):
bundle = GitDagBundle(
name="test",
git_conn_id=CONN_ONLY_PATH,
tracking_ref=GIT_DEFAULT_BRANCH,
)
bundle.initialize()
assert mock_gitRepo.clone_from.call_count == 2
assert mock_gitRepo.return_value.git.checkout.call_count == 1
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_refresh_with_git_connection(self, mock_gitRepo):
bundle = GitDagBundle(
name="test",
git_conn_id="git_default",
tracking_ref=GIT_DEFAULT_BRANCH,
)
bundle.initialize()
# 1 in _clone_bare_repo_if_required, 1 in refresh() for bare repo, 1 in refresh() for working repo
assert mock_gitRepo.return_value.remotes.origin.fetch.call_count == 3
mock_gitRepo.return_value.remotes.origin.fetch.reset_mock()
bundle.refresh()
assert mock_gitRepo.return_value.remotes.origin.fetch.call_count == 2
@pytest.mark.parametrize(
("repo_url", "extra_conn_kwargs", "expected_url"),
[
("git@github.com:apache/airflow.git", None, "https://github.com/apache/airflow/tree/0f0f0f"),
("git@github.com:apache/airflow", None, "https://github.com/apache/airflow/tree/0f0f0f"),
("https://github.com/apache/airflow", None, "https://github.com/apache/airflow/tree/0f0f0f"),
("https://github.com/apache/airflow.git", None, "https://github.com/apache/airflow/tree/0f0f0f"),
("git@gitlab.com:apache/airflow.git", None, "https://gitlab.com/apache/airflow/-/tree/0f0f0f"),
("git@bitbucket.org:apache/airflow.git", None, "https://bitbucket.org/apache/airflow/src/0f0f0f"),
(
"git@myorg.github.com:apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/0f0f0f",
),
(
"https://myorg.github.com/apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/0f0f0f",
),
("/dev/null", None, None),
("file:///dev/null", None, None),
(
"https://github.com/apache/airflow",
{"password": "abc123"},
"https://github.com/apache/airflow/tree/0f0f0f",
),
(
"https://github.com/apache/airflow",
{"login": "abc123"},
"https://github.com/apache/airflow/tree/0f0f0f",
),
(
"https://github.com/apache/airflow",
{"login": "abc123", "password": "def456"},
"https://github.com/apache/airflow/tree/0f0f0f",
),
(
"https://github.com:443/apache/airflow",
None,
"https://github.com:443/apache/airflow/tree/0f0f0f",
),
(
"https://github.com:443/apache/airflow",
{"password": "abc123"},
"https://github.com:443/apache/airflow/tree/0f0f0f",
),
],
)
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_view_url(
self, mock_gitrepo, repo_url, extra_conn_kwargs, expected_url, create_connection_without_db
):
create_connection_without_db(
Connection(
conn_id="my_git_connection",
host=repo_url,
conn_type="git",
**(extra_conn_kwargs or {}),
)
)
bundle = GitDagBundle(
name="test",
git_conn_id="my_git_connection",
tracking_ref="main",
)
bundle.initialize = mock.MagicMock()
view_url = bundle.view_url("0f0f0f")
assert view_url == expected_url
bundle.initialize.assert_not_called()
@pytest.mark.parametrize(
("repo_url", "extra_conn_kwargs", "expected_url"),
[
(
"git@github.com:apache/airflow.git",
None,
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
("git@github.com:apache/airflow", None, "https://github.com/apache/airflow/tree/0f0f0f/subdir"),
(
"https://github.com/apache/airflow",
None,
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com/apache/airflow.git",
None,
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"git@gitlab.com:apache/airflow.git",
None,
"https://gitlab.com/apache/airflow/-/tree/0f0f0f/subdir",
),
(
"git@bitbucket.org:apache/airflow.git",
None,
"https://bitbucket.org/apache/airflow/src/0f0f0f/subdir",
),
(
"git@myorg.github.com:apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://myorg.github.com/apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com/apache/airflow",
{"password": "abc123"},
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com/apache/airflow",
{"login": "abc123"},
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com/apache/airflow",
{"login": "abc123", "password": "def456"},
"https://github.com/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com:443/apache/airflow",
None,
"https://github.com:443/apache/airflow/tree/0f0f0f/subdir",
),
(
"https://github.com:443/apache/airflow",
{"password": "abc123"},
"https://github.com:443/apache/airflow/tree/0f0f0f/subdir",
),
],
)
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_view_url_subdir(
self, mock_gitrepo, repo_url, extra_conn_kwargs, expected_url, create_connection_without_db
):
create_connection_without_db(
Connection(
conn_id="git_default",
host=repo_url,
conn_type="git",
**(extra_conn_kwargs or {}),
)
)
bundle = GitDagBundle(
name="test",
tracking_ref="main",
subdir="subdir",
git_conn_id="git_default",
)
bundle.initialize = mock.MagicMock()
view_url = bundle.view_url("0f0f0f")
assert view_url == expected_url
bundle.initialize.assert_not_called()
@pytest.mark.skipif(not AIRFLOW_V_3_1_PLUS, reason="Airflow 3.0 does not support view_url_template")
@pytest.mark.parametrize(
("repo_url", "extra_conn_kwargs", "expected_url"),
[
(
"git@github.com:apache/airflow.git",
None,
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"git@github.com:apache/airflow",
None,
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com/apache/airflow",
None,
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com/apache/airflow.git",
None,
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"git@gitlab.com:apache/airflow.git",
None,
"https://gitlab.com/apache/airflow/-/tree/{version}/subdir",
),
(
"git@bitbucket.org:apache/airflow.git",
None,
"https://bitbucket.org/apache/airflow/src/{version}/subdir",
),
(
"git@myorg.github.com:apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://myorg.github.com/apache/airflow.git",
None,
"https://myorg.github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com/apache/airflow",
{"password": "abc123"},
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com/apache/airflow",
{"login": "abc123"},
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com/apache/airflow",
{"login": "abc123", "password": "def456"},
"https://github.com/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com:443/apache/airflow",
None,
"https://github.com:443/apache/airflow/tree/{version}/subdir",
),
(
"https://github.com:443/apache/airflow",
{"password": "abc123"},
"https://github.com:443/apache/airflow/tree/{version}/subdir",
),
],
)
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_view_url_template_subdir(
self, mock_gitrepo, repo_url, extra_conn_kwargs, expected_url, create_connection_without_db
):
create_connection_without_db(
Connection(
conn_id="git_default",
host=repo_url,
conn_type="git",
**(extra_conn_kwargs or {}),
)
)
bundle = GitDagBundle(
name="test",
tracking_ref="main",
subdir="subdir",
git_conn_id="git_default",
)
bundle.initialize = mock.MagicMock()
view_url_template = bundle.view_url_template()
assert view_url_template == expected_url
bundle.initialize.assert_not_called()
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_view_url_returns_none_when_no_version_in_view_url(self, mock_gitrepo):
bundle = GitDagBundle(
name="test",
tracking_ref="main",
)
view_url = bundle.view_url(None)
assert view_url is None
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_clone_bare_repo_git_command_error(self, mock_githook):
mock_githook.return_value.repo_url = "git@github.com:apache/airflow.git"
mock_githook.return_value.env = {}
with mock.patch("airflow.providers.git.bundles.git.Repo.clone_from") as mock_clone:
mock_clone.side_effect = GitCommandError("clone", "Simulated error")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref="main")
with pytest.raises(
RuntimeError,
match=re.escape("Error cloning repository"),
):
bundle.initialize()
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_clone_repo_no_such_path_error(self, mock_githook):
mock_githook.return_value.repo_url = "git@github.com:apache/airflow.git"
with mock.patch("airflow.providers.git.bundles.git.os.path.exists", return_value=False):
with mock.patch("airflow.providers.git.bundles.git.Repo.clone_from") as mock_clone:
mock_clone.side_effect = NoSuchPathError("Path not found")
bundle = GitDagBundle(name="test", tracking_ref="main")
with pytest.raises(AirflowException) as exc_info:
bundle._clone_repo_if_required()
assert "Repository path: %s not found" in str(exc_info.value)
@patch.dict(os.environ, {"AIRFLOW_CONN_MY_TEST_GIT": '{"host": "something", "conn_type": "git"}'})
@pytest.mark.parametrize(
("conn_id", "expected_hook_type"),
[("my_test_git", GitHook), ("something-else", type(None))],
)
def test_repo_url_access_missing_connection_doesnt_error(
self, conn_id, expected_hook_type, mock_supervisor_comms
):
if expected_hook_type is type(None):
mock_supervisor_comms.send.return_value = ErrorResponse(error=ErrorType.CONNECTION_NOT_FOUND)
bundle = GitDagBundle(
name="testa",
tracking_ref="main",
git_conn_id=conn_id,
)
assert isinstance(bundle.hook, expected_hook_type)
@mock.patch("airflow.providers.git.bundles.git.GitHook")
def test_lock_used(self, mock_githook, git_repo):
repo_path, repo = git_repo
mock_githook.return_value.repo_url = repo_path
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref=GIT_DEFAULT_BRANCH)
with mock.patch("airflow.providers.git.bundles.git.GitDagBundle.lock") as mock_lock:
bundle.initialize()
assert mock_lock.call_count == 2 # both initialize and refresh
@pytest.mark.parametrize(
("conn_json", "repo_url", "expected"),
[
(
{"host": "git@github.com:apache/airflow.git"},
"git@github.com:apache/hello.git",
"git@github.com:apache/hello.git",
),
({"host": "git@github.com:apache/airflow.git"}, None, "git@github.com:apache/airflow.git"),
({}, "git@github.com:apache/hello.git", "git@github.com:apache/hello.git"),
],
)
def test_repo_url_precedence(self, conn_json, repo_url, expected):
conn_str = json.dumps(conn_json)
with patch.dict(os.environ, {"AIRFLOW_CONN_MY_TEST_GIT": conn_str}):
bundle = GitDagBundle(
name="test",
tracking_ref="main",
git_conn_id="my_test_git",
repo_url=repo_url,
)
assert bundle.repo_url == expected
@mock.patch("airflow.providers.git.bundles.git.Repo")
def test_clone_passes_env_from_githook(self, mock_gitRepo):
def _fake_clone_from(*_, **kwargs):
if "env" not in kwargs:
raise GitCommandError("git", 128, "Permission denied")
return types.SimpleNamespace()
EXPECTED_ENV = {"GIT_SSH_COMMAND": "ssh -i /id_rsa -o StrictHostKeyChecking=no"}
mock_gitRepo.clone_from.side_effect = _fake_clone_from
# Mock needs to support the fetch operation called in _clone_bare_repo_if_required
mock_repo_instance = mock.MagicMock()
mock_gitRepo.return_value = mock_repo_instance
with mock.patch("airflow.providers.git.bundles.git.GitHook") as mock_githook:
mock_githook.return_value.repo_url = "git@github.com:apache/airflow.git"
mock_githook.return_value.env = EXPECTED_ENV
bundle = GitDagBundle(
name="my_repo",
git_conn_id="git_default",
repo_url="git@github.com:apache/airflow.git",
tracking_ref="main",
)
bundle._clone_bare_repo_if_required()
_, kwargs = mock_gitRepo.clone_from.call_args
assert kwargs["env"] == EXPECTED_ENV
@mock.patch("airflow.providers.git.bundles.git.GitHook")
@mock.patch("airflow.providers.git.bundles.git.shutil.rmtree")
@mock.patch("airflow.providers.git.bundles.git.os.path.exists")
def test_clone_bare_repo_invalid_repository_error_retry(self, mock_exists, mock_rmtree, mock_githook):
"""Test that InvalidGitRepositoryError triggers cleanup and retry."""
mock_githook.return_value.repo_url = "git@github.com:apache/airflow.git"
mock_githook.return_value.env = {}
# Set up exists to return True for the bare repo path (simulating corrupted repo exists)
mock_exists.return_value = True
with mock.patch("airflow.providers.git.bundles.git.Repo") as mock_repo_class:
# First call to Repo() raises InvalidGitRepositoryError, second call succeeds
mock_repo_class.side_effect = [
InvalidGitRepositoryError("Invalid git repository"),
mock.MagicMock(), # Second attempt succeeds
]
# Mock successful clone_from for the retry attempt
mock_repo_class.clone_from = mock.MagicMock()
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref="main")
# This should not raise an exception due to retry logic
bundle._clone_bare_repo_if_required()
# Verify cleanup was called
mock_rmtree.assert_called_once_with(bundle.bare_repo_path)
# Verify Repo was called twice (failed attempt + retry)
assert mock_repo_class.call_count == 2
@mock.patch("airflow.providers.git.bundles.git.GitHook")
@mock.patch("airflow.providers.git.bundles.git.shutil.rmtree")
@mock.patch("airflow.providers.git.bundles.git.os.path.exists")
def test_clone_bare_repo_invalid_repository_error_retry_fails(
self, mock_exists, mock_rmtree, mock_githook
):
"""Test that InvalidGitRepositoryError after retry is re-raised (wrapped in AirflowException by caller)."""
mock_githook.return_value.repo_url = "git@github.com:apache/airflow.git"
mock_githook.return_value.env = {}
# Set up exists to return True for the bare repo path
mock_exists.return_value = True
with mock.patch("airflow.providers.git.bundles.git.Repo") as mock_repo_class:
# Both calls to Repo() raise InvalidGitRepositoryError
mock_repo_class.side_effect = InvalidGitRepositoryError("Invalid git repository")
bundle = GitDagBundle(name="test", git_conn_id=CONN_HTTPS, tracking_ref="main")
# The raw exception is raised by the method itself, but wrapped by _initialize
with pytest.raises(InvalidGitRepositoryError, match="Invalid git repository"):
bundle._clone_bare_repo_if_required()
# Verify cleanup was called twice (once for each failed attempt)
assert mock_rmtree.call_count == 2
mock_rmtree.assert_called_with(bundle.bare_repo_path)
# Verify Repo was called twice (failed attempt + failed retry)
assert mock_repo_class.call_count == 2
|
TestGitDagBundle
|
python
|
dask__dask
|
dask/dataframe/tseries/resample.py
|
{
"start": 6670,
"end": 6728
}
|
class ____(ResampleReduction):
how = "ohlc"
|
ResampleOhlc
|
python
|
doocs__leetcode
|
solution/3100-3199/3158.Find the XOR of Numbers Which Appear Twice/Solution2.py
|
{
"start": 0,
"end": 244
}
|
class ____:
def duplicateNumbersXOR(self, nums: List[int]) -> int:
ans = mask = 0
for x in nums:
if mask >> x & 1:
ans ^= x
else:
mask |= 1 << x
return ans
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/gradients_test.py
|
{
"start": 36707,
"end": 41324
}
|
class ____(test_util.TensorFlowTestCase):
def testNoVariables(self):
with ops.Graph().as_default():
func = lambda x: array_ops.identity(x) + 5.0
input_t = constant_op.constant(2.0)
result_t = func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
# There are no variables.
self.assertEqual(dependent_vars, [])
def testVariablesOutside(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
# The variable is closed over. It should be found.
func = lambda x: array_ops.identity(x) + 5.0 + var
input_t = constant_op.constant(2.0)
result_t = func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(dependent_vars, [var])
def testVariableSamePrefix(self):
with ops.Graph().as_default():
var_name = "my_variable"
v_z = variable_scope.get_variable(var_name, shape=())
v_o = variable_scope.get_variable(var_name + "_ones", shape=())
# The variable is closed over. It should be found.
func = lambda x: array_ops.identity(x) + 5.0 + v_z + v_o
input_t = constant_op.constant(2.0)
result_t = func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(set(dependent_vars), set([v_o, v_z]))
def testVariablesOutsideButDSeparated(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0)
var = variables.Variable(init)
# The variable is d-separated by the inputs. It should not be found.
input_t = array_ops.identity(var) * 5.0
func = lambda x: array_ops.identity(x) + 5.0
result_t = func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(dependent_vars, [])
def testVariablesOutsideAndNonDifferentiable(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0, shape=(5,))
var = variables.Variable(init, shape=(5,))
def _Func(x):
# non-differentiable dependency on var.
# the variable should not be found.
y = array_ops.ones_like(var)
return array_ops.identity(x) + 5.0 + y
input_t = constant_op.constant(2.0)
result_t = _Func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(dependent_vars, [])
def testGetVariableByName(self):
with context.graph_mode():
init = constant_op.constant(100.0)
var = variable_v1.VariableV1(init, name="a/replica_1")
if isinstance(var, ref_variable.RefVariable):
var._variable = array_ops.identity(var, name="a")
else:
var._handle = array_ops.identity(var, name="a")
var2 = custom_gradient.get_variable_by_name("a")
self.assertEqual(var2.name, var.name)
def testVariablesOutsideAndNonTrainable(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0, shape=(5,))
# Both variables are used in the function but only the trainable one
# should be found.
var_trainable = variables.Variable(init, shape=(5,))
var_nontrainable = variables.Variable(init, shape=(5,), trainable=False)
def _Func(x):
del x
return var_trainable + var_nontrainable
input_t = constant_op.constant(2.0)
result_t = _Func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(dependent_vars, [var_trainable])
def testVariablesOutsideAndCustomGradient(self):
with ops.Graph().as_default():
init = constant_op.constant(100.0, shape=(5,))
var = variables.Variable(init, shape=(5,))
@custom_gradient.custom_gradient
def _MyOnesLike(x):
"""Dummy version of ones_like which defines a gradient."""
output = array_ops.ones_like(x)
def _Grad(dy):
return array_ops.identity(dy)
return output, _Grad
def _Func(x):
# non-differentiable operation with custom gradient.
# The variable should be found.
y = _MyOnesLike(var)
return array_ops.identity(x) + 5.0 + y
input_t = constant_op.constant(2.0)
result_t = _Func(input_t)
dependent_vars = custom_gradient._get_dependent_variables(
[input_t], [result_t])
self.assertEqual(dependent_vars, [var])
|
GetDependentVariablesTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/request_builders/streams.py
|
{
"start": 5490,
"end": 6481
}
|
class ____(AbstractRequestBuilder):
URL = "https://api.hubapi.com/contacts/v1/lists/all/contacts/all"
def __init__(self) -> None:
self._filters = []
self._vid_offset = None
@property
def _count(self) -> str:
return "count=100"
def with_filter(self, filter_field: str, filter_value: Any) -> "ContactsStreamRequestBuilder":
self._filters.append(f"{filter_field}={filter_value}")
return self
def with_vid_offset(self, vid_offset: str) -> "ContactsStreamRequestBuilder":
self._vid_offset = f"vidOffset={vid_offset}"
return self
@property
def _query_params(self) -> List[str]:
params = [
self._count,
self._vid_offset,
]
params.extend(self._filters)
return filter(None, params)
def build(self) -> HttpRequest:
q = "&".join(filter(None, self._query_params))
return HttpRequest(self.URL, query_params=q)
|
ContactsStreamRequestBuilder
|
python
|
pytorch__pytorch
|
benchmarks/dynamo/runner.py
|
{
"start": 38994,
"end": 43697
}
|
class ____:
"""
Compares the most recent 2 benchmarks to find previously unflagged models
that are now flagged.
"""
def __init__(self, args):
self.args = args
self.lookup_file = os.path.join(self.args.dashboard_archive_path, "lookup.csv")
assert os.path.exists(self.lookup_file)
def generate_comment(self):
title = "## Recent Regressions ##\n"
body = (
"For each relevant compiler, we compare the most recent 2 reports "
"(that actually run the compiler) to find previously unflagged "
"models that are now flagged as problematic (according to the "
"'Warnings' section).\n\n"
)
dtype = self.args.dtypes[0]
device = self.args.devices[0]
for suite in self.args.suites:
body += f"### Regressions for {suite} ###\n"
last2 = {}
for compiler in self.args.flag_compilers:
filenames = [
generate_csv_name(
self.args, dtype, suite, device, compiler, testing
)
for testing in ["performance", "accuracy"]
]
compiler_last2 = find_last_2_with_filenames(
self.lookup_file, self.args.dashboard_archive_path, dtype, filenames
)
if compiler_last2 is not None:
last2[compiler] = [
ParsePerformanceLogs(
[suite],
[device],
[dtype],
[compiler],
[compiler],
get_mode(self.args),
output_dir,
)
for output_dir in compiler_last2
]
for state, path in zip(("Current", "Previous"), compiler_last2):
body += (
f"{state} report name (compiler: {compiler}, "
f"suite: {suite}): {path}\n\n"
)
regressions_present = False
for metric in [
"accuracy",
"speedup",
"compilation_latency",
"compression_ratio",
]:
dfs = []
for compiler in self.args.flag_compilers:
if last2[compiler] is None:
continue
df_cur, df_prev = (
last2[compiler][i].untouched_parsed_frames[suite][metric]
for i in (0, 1)
)
df_merge = df_cur.merge(
df_prev, on="name", suffixes=("_cur", "_prev")
)
flag_fn = FLAG_FNS[metric]
flag = np.logical_and(
df_merge[compiler + "_prev"].apply(
lambda x: not pd.isna(x) and not flag_fn(x)
),
df_merge[compiler + "_cur"].apply(
lambda x: not pd.isna(x) and flag_fn(x)
),
)
df_bad = df_merge[flag]
dfs.append(
pd.DataFrame(
data={
"compiler": compiler,
"name": df_bad["name"],
"prev_status": df_bad[compiler + "_prev"],
"cur_status": df_bad[compiler + "_cur"],
}
)
)
if not dfs:
continue
df = pd.concat(dfs, axis=0)
if df.empty:
continue
regressions_present = True
tabform = tabulate(
df, headers="keys", tablefmt="pretty", showindex="never"
)
str_io = io.StringIO()
str_io.write("\n")
str_io.write(f"{get_metric_title(metric)} regressions\n")
str_io.write("~~~\n")
str_io.write(f"{tabform}\n")
str_io.write("~~~\n")
body += str_io.getvalue()
if not regressions_present:
body += "No regressions found.\n"
comment = generate_dropdown_comment(title, body)
with open(f"{self.args.output_dir}/gh_metric_regression.txt", "w") as gh_fh:
gh_fh.write(comment)
|
RegressionDetector
|
python
|
keras-team__keras
|
keras/src/layers/regularization/dropout_test.py
|
{
"start": 125,
"end": 4167
}
|
class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
def test_validate_noise_shape_none(self):
layer = layers.Dropout(0.5, noise_shape=None)
self.assertIsNone(layer.noise_shape)
def test_validate_noise_shape_integer_tuple(self):
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
self.assertEqual(layer.noise_shape, (20, 1, 10))
def test_validate_noise_shape_none_values(self):
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
self.assertEqual(layer.noise_shape, (None, 1, None))
def test_validate_noise_shape_cast_to_a_tuple(self):
layer = layers.Dropout(0.5, noise_shape=[20, 1, 10])
self.assertEqual(layer.noise_shape, (20, 1, 10))
self.assertIsInstance(layer.noise_shape, tuple)
def test_validate_noise_shape_non_iterable(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected a tuple or list of integers.",
):
layers.Dropout(0.5, noise_shape="Invalid")
def test_validate_noise_shape_invalid_type(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all elements to be integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, 1.5, 10))
def test_validate_noise_shape_negative_value(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all dimensions to be positive integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, -1, 10))
def test_validate_noise_shape_zero_value(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all dimensions to be positive integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, 0, 10))
|
DropoutTest
|
python
|
nedbat__coveragepy
|
tests/test_html.py
|
{
"start": 6869,
"end": 15191
}
|
class ____(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML delta speed-ups."""
def setUp(self) -> None:
super().setUp()
# At least one of our tests monkey-patches the version of coverage.py,
# so grab it here to restore it later.
self.real_coverage_version = coverage.__version__
self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version)
self.files_written: set[str]
def run_coverage(
self,
covargs: dict[str, Any] | None = None,
htmlargs: dict[str, Any] | None = None,
) -> float:
"""Run coverage in-process for the delta tests.
For the delta tests, we always want `source=.` and we want to track
which files are written. `self.files_written` will be the file names
that were opened for writing in html.py.
"""
covargs = covargs or {}
covargs["source"] = "."
self.files_written = set()
mock_open = FileWriteTracker(self.files_written).open
with mock.patch("coverage.html.open", mock_open):
return super().run_coverage(covargs=covargs, htmlargs=htmlargs)
def assert_htmlcov_files_exist(self) -> None:
"""Assert that all the expected htmlcov files exist."""
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/function_index.html")
self.assert_exists("htmlcov/class_index.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/helper2_py.html")
self.assert_exists("htmlcov/.gitignore")
# Cache-busted files have random data in the name, but they should all
# be there, and there should only be one of each.
statics = ["style.css", "coverage_html.js", "keybd_closed.png", "favicon_32.png"]
files = os.listdir("htmlcov")
for static in statics:
base, ext = os.path.splitext(static)
busted_file_pattern = rf"{base}_cb_\w{{8}}{ext}"
matches = [m for f in files if (m := re.fullmatch(busted_file_pattern, f))]
assert len(matches) == 1, f"Found {len(matches)} files for {static}"
def test_html_created(self) -> None:
# Test basic HTML generation: files should be created.
self.create_initial_files()
self.run_coverage()
self.assert_htmlcov_files_exist()
def test_html_delta_from_source_change(self) -> None:
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its source is different.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
# Now change a file (but only in a comment) and do it again.
self.make_file(
"helper1.py",
"""\
def func1(x): # A nice function
if x % 2:
print("odd")
""",
)
self.run_coverage()
# Only the changed files should have been created.
self.assert_htmlcov_files_exist()
assert "htmlcov/index.html" in self.files_written
assert "htmlcov/helper1_py.html" in self.files_written
assert "htmlcov/helper2_py.html" not in self.files_written
assert "htmlcov/main_file_py.html" not in self.files_written
# Because the source change was only a comment, the index is the same.
index2 = self.get_html_index_content()
assert index1 == index2
def test_html_delta_from_coverage_change(self) -> None:
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its coverage is different.
self.create_initial_files()
self.run_coverage()
# Now change a file and do it again. main_file is different, and calls
# helper1 differently.
self.make_file(
"main_file.py",
"""\
import helper1, helper2
helper1.func1(23)
helper2.func2(23)
""",
)
self.run_coverage()
# Only the changed files should have been created.
self.assert_htmlcov_files_exist()
assert "htmlcov/index.html" in self.files_written
assert "htmlcov/helper1_py.html" in self.files_written
assert "htmlcov/helper2_py.html" not in self.files_written
assert "htmlcov/main_file_py.html" in self.files_written
def test_html_delta_from_settings_change(self) -> None:
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py settings
# have changed.
self.create_initial_files()
self.run_coverage(covargs=dict(omit=[]))
index1 = self.get_html_index_content()
self.run_coverage(covargs=dict(omit=["xyzzy*"]))
# All the files have been reported again.
self.assert_htmlcov_files_exist()
assert "htmlcov/index.html" in self.files_written
assert "htmlcov/helper1_py.html" in self.files_written
assert "htmlcov/helper2_py.html" in self.files_written
assert "htmlcov/main_file_py.html" in self.files_written
index2 = self.get_html_index_content()
assert index1 == index2
def test_html_delta_from_coverage_version_change(self) -> None:
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py version has
# changed.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
# "Upgrade" coverage.py!
coverage.__version__ = "XYZZY"
self.run_coverage()
# All the files have been reported again.
self.assert_htmlcov_files_exist()
assert "htmlcov/index.html" in self.files_written
assert "htmlcov/helper1_py.html" in self.files_written
assert "htmlcov/helper2_py.html" in self.files_written
assert "htmlcov/main_file_py.html" in self.files_written
index2 = self.get_html_index_content()
fixed_index2 = index2.replace("XYZZY", self.real_coverage_version)
assert index1 == fixed_index2
def test_file_becomes_100(self) -> None:
self.create_initial_files()
self.run_coverage()
# Now change a file and do it again
self.make_file(
"main_file.py",
"""\
import helper1, helper2
# helper1 is now 100%
helper1.func1(12)
helper1.func1(23)
""",
)
self.run_coverage(htmlargs=dict(skip_covered=True))
# The 100% file, skipped, shouldn't be here.
self.assert_doesnt_exist("htmlcov/helper1_py.html")
def test_status_format_change(self) -> None:
self.create_initial_files()
self.run_coverage()
with open("htmlcov/status.json", encoding="utf-8") as status_json:
status_data = json.load(status_json)
assert status_data["format"] == 5
status_data["format"] = 99
with open("htmlcov/status.json", "w", encoding="utf-8") as status_json:
json.dump(status_data, status_json)
self.run_coverage()
# All the files have been reported again.
self.assert_htmlcov_files_exist()
assert "htmlcov/index.html" in self.files_written
assert "htmlcov/helper1_py.html" in self.files_written
assert "htmlcov/helper2_py.html" in self.files_written
assert "htmlcov/main_file_py.html" in self.files_written
def test_dont_overwrite_gitignore(self) -> None:
self.create_initial_files()
self.make_file("htmlcov/.gitignore", "# ignore nothing")
self.run_coverage()
with open("htmlcov/.gitignore", encoding="utf-8") as fgi:
assert fgi.read() == "# ignore nothing"
def test_dont_write_gitignore_into_existing_directory(self) -> None:
self.create_initial_files()
self.make_file("htmlcov/README", "My files: don't touch!")
self.run_coverage()
self.assert_doesnt_exist("htmlcov/.gitignore")
self.assert_exists("htmlcov/index.html")
|
HtmlDeltaTest
|
python
|
realpython__materials
|
python-copy/mutable_int.py
|
{
"start": 0,
"end": 287
}
|
class ____:
def __init__(self, value):
self.value = value
def __iadd__(self, other):
self.value += other
return self
def __str__(self):
return str(self.value)
if __name__ == "__main__":
x = MutableInt(40)
x += 2
print(x)
|
MutableInt
|
python
|
getsentry__sentry
|
tests/flagpole/test_feature.py
|
{
"start": 298,
"end": 11710
}
|
class ____:
def get_is_true_context_builder(
self, is_true_value: bool
) -> ContextBuilder[SimpleTestContextData]:
return ContextBuilder().add_context_transformer(lambda _data: dict(is_true=is_true_value))
def test_feature_with_empty_segments(self) -> None:
feature = Feature.from_feature_config_json(
"foobar",
"""
{
"created_at": "2023-10-12T00:00:00.000Z",
"owner": "test-owner",
"segments": []
}
""",
)
assert feature.name == "foobar"
assert feature.created_at == "2023-10-12T00:00:00.000Z"
assert feature.owner == "test-owner"
assert feature.segments == []
assert not feature.match(EvaluationContext(dict()))
def test_feature_with_default_rollout(self) -> None:
feature = Feature.from_feature_config_json(
"foo",
"""
{
"owner": "test-user",
"created_at": "2023-10-12T00:00:00.000Z",
"segments": [{
"name": "always_pass_segment",
"conditions": [{
"name": "Always true",
"property": "is_true",
"operator": "equals",
"value": true
}]
}]
}
""",
)
context_builder = self.get_is_true_context_builder(is_true_value=True)
assert feature.segments[0].rollout == 100
assert feature.match(context_builder.build(SimpleTestContextData()))
def test_feature_with_rollout_zero(self) -> None:
feature = Feature.from_feature_config_json(
"foobar",
"""
{
"created_at": "2023-10-12T00:00:00.000Z",
"owner": "test-owner",
"segments": [
{
"name": "exclude",
"rollout": 0,
"conditions": [
{
"property": "user_email",
"operator": "equals",
"value": "nope@example.com"
}
]
},
{
"name": "friends",
"rollout": 100,
"conditions": [
{
"property": "organization_slug",
"operator": "in",
"value": ["acme", "sentry"]
}
]
}
]
}
""",
)
exclude_user = {"user_email": "nope@example.com", "organization_slug": "acme"}
assert not feature.match(EvaluationContext(exclude_user))
match_user = {"user_email": "yes@example.com", "organization_slug": "acme"}
assert feature.match(EvaluationContext(match_user))
def test_all_conditions_in_segment(self) -> None:
feature = Feature.from_feature_config_json(
"foobar",
"""
{
"created_at": "2023-10-12T00:00:00.000Z",
"owner": "test-owner",
"segments": [
{
"name": "multiple conditions",
"rollout": 100,
"conditions": [
{
"property": "user_email",
"operator": "equals",
"value": "yes@example.com"
},
{
"property": "organization_slug",
"operator": "in",
"value": ["acme", "sentry"]
}
]
}
]
}
""",
)
exclude_user = {"user_email": "yes@example.com"}
assert not feature.match(EvaluationContext(exclude_user))
match_user = {"user_email": "yes@example.com", "organization_slug": "acme"}
assert feature.match(EvaluationContext(match_user))
def test_valid_with_all_nesting(self) -> None:
feature = Feature.from_feature_config_json(
"foobar",
"""
{
"created_at": "2023-10-12T00:00:00.000Z",
"owner": "test-owner",
"segments": [{
"name": "segment1",
"rollout": 100,
"conditions": [{
"property": "test_property",
"operator": "in",
"value": ["foobar"]
}]
}]
}
""",
)
assert feature.name == "foobar"
assert len(feature.segments) == 1
assert feature.segments[0].name == "segment1"
assert feature.segments[0].rollout == 100
assert len(feature.segments[0].conditions) == 1
condition = feature.segments[0].conditions[0]
assert condition.property == "test_property"
assert condition.operator
assert condition.operator == ConditionOperatorKind.IN
assert condition.value == ["foobar"]
assert feature.match(EvaluationContext(dict(test_property="foobar")))
assert not feature.match(EvaluationContext(dict(test_property="barfoo")))
def test_invalid_json(self) -> None:
with pytest.raises(InvalidFeatureFlagConfiguration):
Feature.from_feature_config_json("foobar", "{")
def test_validate_invalid_schema(self) -> None:
config = """
{
"owner": "sentry",
"created_at": "2024-05-14",
"segments": [
{
"name": "",
"rollout": 1,
"conditions": []
}
]
}
"""
feature = Feature.from_feature_config_json("trash", config)
with pytest.raises(jsonschema.ValidationError) as err:
feature.validate()
assert "is too short" in str(err)
config = """
{
"owner": "sentry",
"created_at": "2024-05-14",
"segments": [
{
"name": "allowed orgs",
"rollout": 1,
"conditions": [
{
"property": "organization_slug",
"operator": "contains",
"value": ["derp"]
}
]
}
]
}
"""
feature = Feature.from_feature_config_json("trash", config)
with pytest.raises(jsonschema.ValidationError) as err:
feature.validate()
assert "'contains'} is not valid" in str(err)
def test_validate_valid(self) -> None:
config = """
{
"owner": "sentry",
"created_at": "2024-05-14",
"segments": [
{
"name": "ga",
"rollout": 100,
"conditions": []
}
]
}
"""
feature = Feature.from_feature_config_json("redpaint", config)
assert feature.validate()
def test_empty_string_name(self) -> None:
with pytest.raises(InvalidFeatureFlagConfiguration) as exception:
Feature.from_feature_config_json("", '{"segments":[]}')
assert "Feature name is required" in str(exception)
def test_missing_segments(self) -> None:
with pytest.raises(InvalidFeatureFlagConfiguration) as exception:
Feature.from_feature_config_json("foo", "{}")
assert "Feature has no segments defined" in str(exception)
def test_invalid_operator_condition(self) -> None:
config = """
{
"owner": "sentry",
"segments": [
{
"name": "derp",
"conditions": [
{"property": "user_email", "operator": "trash", "value": 1}
]
}
]
}
"""
with pytest.raises(InvalidFeatureFlagConfiguration) as exception:
Feature.from_feature_config_json("foo", config)
assert "Provided config_dict is not a valid feature" in str(exception)
def test_enabled_feature(self) -> None:
feature = Feature.from_feature_config_json(
"foo",
"""
{
"owner": "test-user",
"created_at": "2023-10-12T00:00:00.000Z",
"segments": [{
"name": "always_pass_segment",
"rollout": 100,
"conditions": [{
"name": "Always true",
"property": "is_true",
"operator": "equals",
"value": true
}]
}]
}
""",
)
context_builder = self.get_is_true_context_builder(is_true_value=True)
assert feature.match(context_builder.build(SimpleTestContextData()))
def test_disabled_feature(self) -> None:
feature = Feature.from_feature_config_json(
"foo",
"""
{
"owner": "test-user",
"enabled": false,
"created_at": "2023-12-12T00:00:00.000Z",
"segments": [{
"name": "always_pass_segment",
"rollout": 100,
"conditions": [{
"name": "Always true",
"property": "is_true",
"operator": "equals",
"value": true
}]
}]
}
""",
)
context_builder = self.get_is_true_context_builder(is_true_value=True)
assert not feature.match(context_builder.build(SimpleTestContextData()))
def test_dump_yaml(self) -> None:
feature = Feature.from_feature_config_json(
"foo",
"""
{
"owner": "test-user",
"created_at": "2023-12-12T00:00:00.000Z",
"segments": [{
"name": "always_pass_segment",
"rollout": 100,
"conditions": [{
"name": "Always true",
"property": "is_true",
"operator": "equals",
"value": true
}]
}]
}
""",
)
parsed_json = orjson.loads(feature.to_json_str())
parsed_yaml = dict(yaml.safe_load(feature.to_yaml_str()))
assert "foo" in parsed_yaml
assert parsed_yaml == parsed_json
features_from_yaml = Feature.from_bulk_yaml(feature.to_yaml_str())
assert features_from_yaml == [feature]
|
TestParseFeatureConfig
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/declarative_automation/operands/run_operands.py
|
{
"start": 6667,
"end": 7079
}
|
class ____(NewUpdatesWithRunTagsCondition):
@property
def base_name(self) -> str:
return "any_new_update_has_run_tags"
def match_candidate_runs(self, candidate_run_ids: Set[str], matching_run_ids: Set[str]) -> bool:
# at least one candidate run must have matched
return len(candidate_run_ids & matching_run_ids) > 0
@whitelist_for_serdes
@record
|
AnyNewUpdateHasRunTagsCondition
|
python
|
gevent__gevent
|
src/gevent/tests/test__threadpool.py
|
{
"start": 15203,
"end": 15497
}
|
class ____(TestCase):
def test(self):
pool = self._makeOne(1)
pool.spawn(noop)
gevent.sleep(0)
pool.kill()
from gevent import monkey
@greentest.skipUnless(
hasattr(gevent.threadpool, 'ThreadPoolExecutor'),
"Requires ThreadPoolExecutor")
|
TestRefCount
|
python
|
joke2k__faker
|
faker/providers/user_agent/en_US/__init__.py
|
{
"start": 67,
"end": 131
}
|
class ____(UserAgentProvider): # pragma: no cover
pass
|
Provider
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/cloud_sql.py
|
{
"start": 20168,
"end": 32571
}
|
class ____(LoggingMixin):
"""
Downloads and runs cloud-sql-proxy as subprocess of the Python process.
The cloud-sql-proxy needs to be downloaded and started before we can connect
to the Google Cloud SQL instance via database connection. It establishes
secure tunnel connection to the database. It authorizes using the
Google Cloud credentials that are passed by the configuration.
More details about the proxy can be found here:
https://cloud.google.com/sql/docs/mysql/sql-proxy
:param path_prefix: Unique path prefix where proxy will be downloaded and
directories created for unix sockets.
:param instance_specification: Specification of the instance to connect the
proxy to. It should be specified in the form that is described in
https://cloud.google.com/sql/docs/mysql/sql-proxy#multiple-instances in
-instances parameter (typically in the form of ``<project>:<region>:<instance>``
for UNIX socket connections and in the form of
``<project>:<region>:<instance>=tcp:<port>`` for TCP connections.
:param gcp_conn_id: Id of Google Cloud connection to use for
authentication
:param project_id: Optional id of the Google Cloud project to connect to - it overwrites
default project id taken from the Google Cloud connection.
:param sql_proxy_version: Specific version of SQL proxy to download
(for example 'v1.13'). By default, latest version is downloaded.
:param sql_proxy_binary_path: If specified, then proxy will be
used from the path specified rather than dynamically generated. This means
that if the binary is not present in that path it will also be downloaded.
"""
def __init__(
self,
path_prefix: str,
instance_specification: str,
gcp_conn_id: str = "google_cloud_default",
project_id: str = PROVIDE_PROJECT_ID,
sql_proxy_version: str | None = None,
sql_proxy_binary_path: str | None = None,
) -> None:
super().__init__()
self.path_prefix = path_prefix
if not self.path_prefix:
raise AirflowException("The path_prefix must not be empty!")
self.sql_proxy_was_downloaded = False
self.sql_proxy_version = sql_proxy_version
self.download_sql_proxy_dir = None
self.sql_proxy_process: Popen | None = None
self.instance_specification = instance_specification
self.project_id = project_id
self.gcp_conn_id = gcp_conn_id
self.command_line_parameters: list[str] = []
self.cloud_sql_proxy_socket_directory = self.path_prefix
self.sql_proxy_path = sql_proxy_binary_path or f"{self.path_prefix}_cloud_sql_proxy"
self.credentials_path = self.path_prefix + "_credentials.json"
self._build_command_line_parameters()
def _build_command_line_parameters(self) -> None:
self.command_line_parameters.extend(["-dir", self.cloud_sql_proxy_socket_directory])
self.command_line_parameters.extend(["-instances", self.instance_specification])
@staticmethod
def _is_os_64bit() -> bool:
return platform.machine().endswith("64")
def _download_sql_proxy_if_needed(self) -> None:
if os.path.isfile(self.sql_proxy_path):
self.log.info("cloud-sql-proxy is already present")
return
download_url = self._get_sql_proxy_download_url()
proxy_path_tmp = self.sql_proxy_path + ".tmp"
self.log.info("Downloading cloud_sql_proxy from %s to %s", download_url, proxy_path_tmp)
# httpx has a breaking API change (follow_redirects vs allow_redirects)
# and this should work with both versions (cf. issue #20088)
if "follow_redirects" in signature(httpx.get).parameters.keys():
response = httpx.get(download_url, follow_redirects=True)
else:
response = httpx.get(download_url, allow_redirects=True) # type: ignore[call-arg]
# Downloading to .tmp file first to avoid case where partially downloaded
# binary is used by parallel operator which uses the same fixed binary path
with open(proxy_path_tmp, "wb") as file:
file.write(response.content)
if response.status_code != 200:
raise AirflowException(
"The cloud-sql-proxy could not be downloaded. "
f"Status code = {response.status_code}. Reason = {response.reason_phrase}"
)
self.log.info("Moving sql_proxy binary from %s to %s", proxy_path_tmp, self.sql_proxy_path)
shutil.move(proxy_path_tmp, self.sql_proxy_path)
os.chmod(self.sql_proxy_path, 0o744) # Set executable bit
self.sql_proxy_was_downloaded = True
def _get_sql_proxy_download_url(self):
system = platform.system().lower()
processor = os.uname().machine
if processor == "x86_64":
processor = "amd64"
elif processor == "aarch64":
processor = "arm64"
if not self.sql_proxy_version:
download_url = CLOUD_SQL_PROXY_DOWNLOAD_URL.format(system, processor)
else:
if not CLOUD_SQL_PROXY_VERSION_REGEX.match(self.sql_proxy_version):
raise ValueError(
"The sql_proxy_version should match the regular expression "
f"{CLOUD_SQL_PROXY_VERSION_REGEX.pattern}"
)
download_url = CLOUD_SQL_PROXY_VERSION_DOWNLOAD_URL.format(
self.sql_proxy_version, system, processor
)
return download_url
def _get_credential_parameters(self) -> list[str]:
extras = GoogleBaseHook.get_connection(conn_id=self.gcp_conn_id).extra_dejson
key_path = get_field(extras, "key_path")
keyfile_dict = get_field(extras, "keyfile_dict")
if key_path:
credential_params = ["-credential_file", key_path]
elif keyfile_dict:
keyfile_content = keyfile_dict if isinstance(keyfile_dict, dict) else json.loads(keyfile_dict)
self.log.info("Saving credentials to %s", self.credentials_path)
with open(self.credentials_path, "w") as file:
json.dump(keyfile_content, file)
credential_params = ["-credential_file", self.credentials_path]
else:
self.log.info(
"The credentials are not supplied by neither key_path nor "
"keyfile_dict of the gcp connection %s. Falling back to "
"default activated account",
self.gcp_conn_id,
)
credential_params = []
if not self.instance_specification:
project_id = get_field(extras, "project")
if self.project_id:
project_id = self.project_id
if not project_id:
raise AirflowException(
"For forwarding all instances, the project id "
"for Google Cloud should be provided either "
"by project_id extra in the Google Cloud connection or by "
"project_id provided in the operator."
)
credential_params.extend(["-projects", project_id])
return credential_params
def start_proxy(self) -> None:
"""
Start Cloud SQL Proxy.
You have to remember to stop the proxy if you started it!
"""
self._download_sql_proxy_if_needed()
if self.sql_proxy_process:
raise AirflowException(f"The sql proxy is already running: {self.sql_proxy_process}")
command_to_run = [self.sql_proxy_path]
command_to_run.extend(self.command_line_parameters)
self.log.info("Creating directory %s", self.cloud_sql_proxy_socket_directory)
Path(self.cloud_sql_proxy_socket_directory).mkdir(parents=True, exist_ok=True)
command_to_run.extend(self._get_credential_parameters())
self.log.info("Running the command: `%s`", " ".join(command_to_run))
self.sql_proxy_process = Popen(command_to_run, stdin=PIPE, stdout=PIPE, stderr=PIPE)
self.log.info("The pid of cloud_sql_proxy: %s", self.sql_proxy_process.pid)
while True:
line = (
self.sql_proxy_process.stderr.readline().decode("utf-8")
if self.sql_proxy_process.stderr
else ""
)
return_code = self.sql_proxy_process.poll()
if line == "" and return_code is not None:
self.sql_proxy_process = None
raise AirflowException(f"The cloud_sql_proxy finished early with return code {return_code}!")
if line != "":
self.log.info(line)
if "googleapi: Error" in line or "invalid instance name:" in line:
self.stop_proxy()
raise AirflowException(f"Error when starting the cloud_sql_proxy {line}!")
if "Ready for new connections" in line:
return
def stop_proxy(self) -> None:
"""
Stop running proxy.
You should stop the proxy after you stop using it.
"""
if not self.sql_proxy_process:
raise AirflowException("The sql proxy is not started yet")
self.log.info("Stopping the cloud_sql_proxy pid: %s", self.sql_proxy_process.pid)
self.sql_proxy_process.kill()
self.sql_proxy_process = None
# Cleanup!
self.log.info("Removing the socket directory: %s", self.cloud_sql_proxy_socket_directory)
shutil.rmtree(self.cloud_sql_proxy_socket_directory, ignore_errors=True)
if self.sql_proxy_was_downloaded:
self.log.info("Removing downloaded proxy: %s", self.sql_proxy_path)
# Silently ignore if the file has already been removed (concurrency)
try:
os.remove(self.sql_proxy_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
self.log.info("Skipped removing proxy - it was not downloaded: %s", self.sql_proxy_path)
if os.path.isfile(self.credentials_path):
self.log.info("Removing generated credentials file %s", self.credentials_path)
# Here file cannot be deleted by concurrent task (each task has its own copy)
os.remove(self.credentials_path)
def get_proxy_version(self) -> str | None:
"""Return version of the Cloud SQL Proxy."""
self._download_sql_proxy_if_needed()
command_to_run = [self.sql_proxy_path]
command_to_run.extend(["--version"])
command_to_run.extend(self._get_credential_parameters())
result = subprocess.check_output(command_to_run).decode("utf-8")
matched = re.search("[Vv]ersion (.*?);", result)
if matched:
return matched.group(1)
return None
def get_socket_path(self) -> str:
"""
Retrieve UNIX socket path used by Cloud SQL Proxy.
:return: The dynamically generated path for the socket created by the proxy.
"""
return self.cloud_sql_proxy_socket_directory + "/" + self.instance_specification
CONNECTION_URIS: dict[str, dict[str, dict[str, str]]] = {
"postgres": {
"proxy": {
"tcp": "postgresql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "postgresql://{user}:{password}@{socket_path}/{database}",
},
"public": {
"ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}?"
"sslmode=verify-ca&"
"sslcert={client_cert_file}&"
"sslkey={client_key_file}&"
"sslrootcert={server_ca_file}",
"non-ssl": "postgresql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
"mysql": {
"proxy": {
"tcp": "mysql://{user}:{password}@127.0.0.1:{proxy_port}/{database}",
"socket": "mysql://{user}:{password}@localhost/{database}?unix_socket={socket_path}",
},
"public": {
"ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}?ssl={ssl_spec}",
"non-ssl": "mysql://{user}:{password}@{public_ip}:{public_port}/{database}",
},
},
}
CLOUD_SQL_VALID_DATABASE_TYPES = ["postgres", "mysql"]
|
CloudSqlProxyRunner
|
python
|
celery__celery
|
celery/worker/consumer/delayed_delivery.py
|
{
"start": 1307,
"end": 10127
}
|
class ____(bootsteps.StartStopStep):
"""Bootstep that sets up native delayed delivery functionality.
This component handles the setup and configuration of native delayed delivery
for Celery workers. It is automatically included when quorum queues are
detected in the application configuration.
Responsibilities:
- Declaring native delayed delivery exchanges and queues
- Binding all application queues to the delayed delivery exchanges
- Handling connection failures gracefully with retries
- Validating configuration settings
"""
requires = (Tasks,)
def include_if(self, c: Consumer) -> bool:
"""Determine if this bootstep should be included.
Args:
c: The Celery consumer instance
Returns:
bool: True if quorum queues are detected, False otherwise
"""
return detect_quorum_queues(c.app, c.app.connection_for_write().transport.driver_type)[0]
def start(self, c: Consumer) -> None:
"""Initialize delayed delivery for all broker URLs.
Attempts to set up delayed delivery for each broker URL in the configuration.
Failures are logged but don't prevent attempting remaining URLs.
Args:
c: The Celery consumer instance
Raises:
ValueError: If configuration validation fails
"""
app: Celery = c.app
try:
self._validate_configuration(app)
except ValueError as e:
logger.critical("Configuration validation failed: %s", str(e))
raise
broker_urls = self._validate_broker_urls(app.conf.broker_url)
setup_errors = []
for broker_url in broker_urls:
try:
retry_over_time(
self._setup_delayed_delivery,
args=(c, broker_url),
catch=RETRIED_EXCEPTIONS,
errback=self._on_retry,
interval_start=RETRY_INTERVAL,
max_retries=MAX_RETRIES,
)
except Exception as e:
logger.warning(
"Failed to setup delayed delivery for %r: %s",
maybe_sanitize_url(broker_url), str(e)
)
setup_errors.append((broker_url, e))
if len(setup_errors) == len(broker_urls):
logger.critical(
"Failed to setup delayed delivery for all broker URLs. "
"Native delayed delivery will not be available."
)
def _setup_delayed_delivery(self, c: Consumer, broker_url: str) -> None:
"""Set up delayed delivery for a specific broker URL.
Args:
c: The Celery consumer instance
broker_url: The broker URL to configure
Raises:
ConnectionRefusedError: If connection to the broker fails
OSError: If there are network-related issues
Exception: For other unexpected errors during setup
"""
with c.app.connection_for_write(url=broker_url) as connection:
queue_type = c.app.conf.broker_native_delayed_delivery_queue_type
logger.debug(
"Setting up delayed delivery for broker %r with queue type %r",
maybe_sanitize_url(broker_url), queue_type
)
try:
declare_native_delayed_delivery_exchanges_and_queues(
connection,
queue_type
)
except Exception as e:
logger.warning(
"Failed to declare exchanges and queues for %r: %s",
maybe_sanitize_url(broker_url), str(e)
)
raise
try:
self._bind_queues(c.app, connection)
except Exception as e:
logger.warning(
"Failed to bind queues for %r: %s",
maybe_sanitize_url(broker_url), str(e)
)
raise
def _bind_queues(self, app: Celery, connection: Connection) -> None:
"""Bind all application queues to delayed delivery exchanges.
Args:
app: The Celery application instance
connection: The broker connection to use
Raises:
Exception: If queue binding fails
"""
queues: ValuesView[Queue] = app.amqp.queues.values()
if not queues:
logger.warning("No queues found to bind for delayed delivery")
return
exceptions: list[Exception] = []
for queue in queues:
try:
logger.debug("Binding queue %r to delayed delivery exchange", queue.name)
bind_queue_to_native_delayed_delivery_exchange(connection, queue)
except Exception as e:
logger.error(
"Failed to bind queue %r: %s",
queue.name, str(e)
)
# We must re-raise on retried exceptions to ensure they are
# caught with the outer retry_over_time mechanism.
#
# This could be removed if one of:
# * The minimum python version for Celery and Kombu is
# increased to 3.11. Kombu updated to use the `except*`
# clause to catch specific exceptions from an ExceptionGroup.
# * Kombu's retry_over_time utility is updated to use the
# catch utility from agronholm's exceptiongroup backport.
if isinstance(e, RETRIED_EXCEPTIONS):
raise
exceptions.append(e)
if exceptions:
raise ExceptionGroup(
("One or more failures occurred while binding queues to "
"delayed delivery exchanges"),
exceptions,
)
def _on_retry(self, exc: Exception, interval_range: Iterator[float], intervals_count: int) -> float:
"""Callback for retry attempts.
Args:
exc: The exception that triggered the retry
interval_range: An iterator which returns the time in seconds to sleep next
intervals_count: Number of retry attempts so far
"""
interval = next(interval_range)
logger.warning(
"Retrying delayed delivery setup (attempt %d/%d) after error: %s. Sleeping %.2f seconds.",
intervals_count + 1, MAX_RETRIES, str(exc), interval
)
return interval
def _validate_configuration(self, app: Celery) -> None:
"""Validate all required configuration settings.
Args:
app: The Celery application instance
Raises:
ValueError: If any configuration is invalid
"""
# Validate broker URLs
self._validate_broker_urls(app.conf.broker_url)
# Validate queue type
self._validate_queue_type(app.conf.broker_native_delayed_delivery_queue_type)
def _validate_broker_urls(self, broker_urls: Union[str, List[str]]) -> Set[str]:
"""Validate and split broker URLs.
Args:
broker_urls: Broker URLs, either as a semicolon-separated string
or as a list of strings
Returns:
Set of valid broker URLs
Raises:
ValueError: If no valid broker URLs are found or if invalid URLs are provided
"""
if not broker_urls:
raise ValueError("broker_url configuration is empty")
if isinstance(broker_urls, str):
brokers = broker_urls.split(";")
elif isinstance(broker_urls, list):
if not all(isinstance(url, str) for url in broker_urls):
raise ValueError("All broker URLs must be strings")
brokers = broker_urls
else:
raise ValueError(f"broker_url must be a string or list, got {broker_urls!r}")
valid_urls = {url for url in brokers}
if not valid_urls:
raise ValueError("No valid broker URLs found in configuration")
return valid_urls
def _validate_queue_type(self, queue_type: Optional[str]) -> None:
"""Validate the queue type configuration.
Args:
queue_type: The configured queue type
Raises:
ValueError: If queue type is invalid
"""
if not queue_type:
raise ValueError("broker_native_delayed_delivery_queue_type is not configured")
if queue_type not in VALID_QUEUE_TYPES:
sorted_types = sorted(VALID_QUEUE_TYPES)
raise ValueError(
f"Invalid queue type {queue_type!r}. Must be one of: {', '.join(sorted_types)}"
)
|
DelayedDelivery
|
python
|
sqlalchemy__sqlalchemy
|
test/ext/test_mutable.py
|
{
"start": 21151,
"end": 25747
}
|
class ____(_MutableSetTestFixture):
run_define_tables = "each"
def setup_mappers(cls):
foo = cls.tables.foo
cls.mapper_registry.map_imperatively(Foo, foo)
def test_coerce_none(self):
sess = fixture_session()
f1 = Foo(data=None)
sess.add(f1)
sess.commit()
eq_(f1.data, None)
def test_coerce_raise(self):
assert_raises_message(
ValueError,
"Attribute 'data' does not accept objects of type",
Foo,
data=[1, 2, 3],
)
def test_clear(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.clear()
sess.commit()
eq_(f1.data, set())
def test_pop(self):
sess = fixture_session()
f1 = Foo(data={1})
sess.add(f1)
sess.commit()
eq_(f1.data.pop(), 1)
sess.commit()
assert_raises(KeyError, f1.data.pop)
eq_(f1.data, set())
def test_add(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.add(5)
sess.commit()
eq_(f1.data, {1, 2, 5})
def test_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.update({2, 5})
sess.commit()
eq_(f1.data, {1, 2, 5})
def test_binary_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data |= {2, 5}
sess.commit()
eq_(f1.data, {1, 2, 5})
def test_intersection_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.intersection_update({2, 5})
sess.commit()
eq_(f1.data, {2})
def test_binary_intersection_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data &= {2, 5}
sess.commit()
eq_(f1.data, {2})
def test_difference_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.difference_update({2, 5})
sess.commit()
eq_(f1.data, {1})
def test_operator_difference_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data -= {2, 5}
sess.commit()
eq_(f1.data, {1})
def test_symmetric_difference_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data.symmetric_difference_update({2, 5})
sess.commit()
eq_(f1.data, {1, 5})
def test_binary_symmetric_difference_update(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data ^= {2, 5}
sess.commit()
eq_(f1.data, {1, 5})
def test_remove(self):
sess = fixture_session()
f1 = Foo(data={1, 2, 3})
sess.add(f1)
sess.commit()
f1.data.remove(2)
sess.commit()
eq_(f1.data, {1, 3})
def test_discard(self):
sess = fixture_session()
f1 = Foo(data={1, 2, 3})
sess.add(f1)
sess.commit()
f1.data.discard(2)
sess.commit()
eq_(f1.data, {1, 3})
f1.data.discard(2)
sess.commit()
eq_(f1.data, {1, 3})
def test_pickle_parent(self):
sess = fixture_session()
f1 = Foo(data={1, 2})
sess.add(f1)
sess.commit()
f1.data
sess.close()
for loads, dumps in picklers():
sess = fixture_session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data.add(3)
assert f2 in sess.dirty
def test_unrelated_flush(self):
sess = fixture_session()
f1 = Foo(data={1, 2}, unrelated_data="unrelated")
sess.add(f1)
sess.flush()
f1.unrelated_data = "unrelated 2"
sess.flush()
f1.data.add(3)
sess.commit()
eq_(f1.data, {1, 2, 3})
def test_copy(self):
f1 = Foo(data={1, 2})
f1.data = copy.copy(f1.data)
eq_(f1.data, {1, 2})
def test_deepcopy(self):
f1 = Foo(data={1, 2})
f1.data = copy.deepcopy(f1.data)
eq_(f1.data, {1, 2})
|
_MutableSetTestBase
|
python
|
vyperlang__vyper
|
vyper/compiler/input_bundle.py
|
{
"start": 5888,
"end": 7287
}
|
class ____(InputBundle):
def _normalize_path(self, path: Path) -> Path:
# normalize the path with os.path.normpath, to break down
# things like "foo/bar/../x.vy" => "foo/x.vy", with all
# the caveats around symlinks that os.path.normpath comes with.
try:
return path.resolve(strict=True)
except (FileNotFoundError, NotADirectoryError):
raise _NotFound(path)
def _load_from_path(self, resolved_path: Path, original_path: Path) -> CompilerInput:
try:
with resolved_path.open() as f:
code = f.read()
except (FileNotFoundError, NotADirectoryError):
raise _NotFound(resolved_path)
source_id = super()._generate_source_id(resolved_path)
return FileInput(source_id, original_path, resolved_path, code)
# wrap os.path.normpath, but return the same type as the input -
# but use posixpath instead so that things work cross-platform.
def _normpath(path):
cls = path.__class__
if not isinstance(path, str):
path = path.as_posix()
return cls(posixpath.normpath(path))
# fake filesystem for "standard JSON" (aka solc-style) inputs. takes search
# paths, and `load_file()` "reads" the file from the JSON input. Note that this
# input bundle type never actually interacts with the filesystem -- it is
# guaranteed to be pure!
|
FilesystemInputBundle
|
python
|
astropy__astropy
|
astropy/utils/console.py
|
{
"start": 10224,
"end": 22540
}
|
class ____:
"""
A class to display a progress bar in the terminal.
It is designed to be used either with the ``with`` statement::
with ProgressBar(len(items)) as bar:
for item in enumerate(items):
bar.update()
or as a generator::
for item in ProgressBar(items):
item.process()
"""
def __init__(self, total_or_items, ipython_widget=False, file=None):
"""
Parameters
----------
total_or_items : int or sequence
If an int, the number of increments in the process being
tracked. If a sequence, the items to iterate over.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : :term:`file-like (writeable)`, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the progress bar will be
completely silent.
"""
if file is None:
file = sys.stdout
if not ipython_widget and not isatty(file):
self.update = self._silent_update
self._silent = True
else:
self._silent = False
if np.iterable(total_or_items):
self._items = iter(total_or_items)
self._total = len(total_or_items)
else:
try:
self._total = int(total_or_items)
except TypeError:
raise TypeError("First argument must be int or sequence")
else:
self._items = iter(range(self._total))
self._file = file
self._start_time = time.time()
self._human_total = human_file_size(self._total)
self._ipython_widget = ipython_widget
self._signal_set = False
if not ipython_widget:
self._should_handle_resize = _CAN_RESIZE_TERMINAL and self._file.isatty()
self._handle_resize()
if self._should_handle_resize:
signal.signal(signal.SIGWINCH, self._handle_resize)
self._signal_set = True
self.update(0)
def _handle_resize(self, signum=None, frame=None):
terminal_width = get_terminal_size().columns
self._bar_length = terminal_width - 37
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._silent:
if exc_type is None:
self.update(self._total)
self._file.write("\n")
self._file.flush()
if self._signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
def __iter__(self):
return self
def __next__(self):
try:
rv = next(self._items)
except StopIteration:
self.__exit__(None, None, None)
raise
else:
self.update()
return rv
def update(self, value=None):
"""
Update progress bar via the console or notebook accordingly.
"""
# Update self.value
if value is None:
value = self._current_value + 1
self._current_value = value
# Choose the appropriate environment
if self._ipython_widget:
self._update_ipython_widget(value)
else:
self._update_console(value)
def _update_console(self, value=None):
"""
Update the progress bar to the given value (out of the total
given to the constructor).
"""
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write("\r|")
color_print("=" * bar_fill, "blue", file=file, end="")
if bar_fill < self._bar_length:
color_print(">", "green", file=file, end="")
write("-" * (self._bar_length - bar_fill - 1))
write("|")
if value >= self._total:
t = time.time() - self._start_time
prefix = " "
elif value <= 0:
t = None
prefix = ""
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix = " ETA "
write(f" {human_file_size(value):>4s}/{self._human_total:>4s}")
write(f" ({frac:>6.2%})")
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush()
def _update_ipython_widget(self, value=None):
"""
Update the progress bar to the given value (out of a total
given to the constructor).
This method is for use in the IPython notebook 2+.
"""
# Create and display an empty progress bar widget,
# if none exists.
if not hasattr(self, "_widget"):
# Import only if an IPython widget, i.e., widget in iPython NB
if not HAS_IPYWIDGETS:
raise ModuleNotFoundError("ipywidgets is not installed")
from ipywidgets import widgets
self._widget = widgets.FloatProgress()
from IPython.display import display
display(self._widget)
self._widget.value = 0
# Calculate percent completion, and update progress bar
frac = value / self._total
self._widget.value = frac * 100
self._widget.description = f" ({frac:>6.2%})"
def _silent_update(self, value=None):
pass
@classmethod
def map(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items while displaying a progress bar with percentage complete.
The map operation may run in arbitrary order on the items, but the results are
returned in sequential order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : :term:`file-like (writeable)`, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
if multiprocess:
function = _mapfunc(function)
items = list(enumerate(items))
results = cls.map_unordered(
function,
items,
multiprocess=multiprocess,
file=file,
step=step,
ipython_widget=ipython_widget,
multiprocessing_start_method=multiprocessing_start_method,
)
if multiprocess:
_, results = zip(*sorted(results))
results = list(results)
return results
@classmethod
def map_unordered(
cls,
function,
items,
multiprocess=False,
file=None,
step=100,
ipython_widget=False,
multiprocessing_start_method=None,
):
"""Map function over items, reporting the progress.
Does a `map` operation while displaying a progress bar with
percentage complete. The map operation may run on arbitrary order
on the items, and the results may be returned in arbitrary order.
::
def work(i):
print(i)
ProgressBar.map(work, range(50))
Parameters
----------
function : function
Function to call for each step
items : sequence
Sequence where each element is a tuple of arguments to pass to
*function*.
multiprocess : bool, int, optional
If `True`, use the `multiprocessing` module to distribute each task
to a different processor core. If a number greater than 1, then use
that number of cores.
ipython_widget : bool, optional
If `True`, the progress bar will display as an IPython
notebook widget.
file : :term:`file-like (writeable)`, optional
The file to write the progress bar to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any), the scrollbar will
be completely silent.
step : int, optional
Update the progress bar at least every *step* steps (default: 100).
If ``multiprocess`` is `True`, this will affect the size
of the chunks of ``items`` that are submitted as separate tasks
to the process pool. A large step size may make the job
complete faster if ``items`` is very long.
multiprocessing_start_method : str, optional
Useful primarily for testing; if in doubt leave it as the default.
When using multiprocessing, certain anomalies occur when starting
processes with the "spawn" method (the only option on Windows);
other anomalies occur with the "fork" method (the default on
Linux).
"""
# concurrent.futures import here to avoid import failure when running
# in pyodide/Emscripten
from concurrent.futures import ProcessPoolExecutor, as_completed
results = []
if file is None:
file = sys.stdout
with cls(len(items), ipython_widget=ipython_widget, file=file) as bar:
if bar._ipython_widget:
chunksize = step
else:
default_step = max(int(float(len(items)) / bar._bar_length), 1)
chunksize = min(default_step, step)
if not multiprocess or multiprocess < 1:
for i, item in enumerate(items):
results.append(function(item))
if (i % chunksize) == 0:
bar.update(i)
else:
ctx = multiprocessing.get_context(multiprocessing_start_method)
kwargs = dict(mp_context=ctx)
with ProcessPoolExecutor(
max_workers=(
int(multiprocess) if multiprocess is not True else None
),
**kwargs,
) as p:
for i, f in enumerate(
as_completed(p.submit(function, item) for item in items)
):
bar.update(i)
results.append(f.result())
return results
|
ProgressBar
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/experiment_service.py
|
{
"start": 10457,
"end": 12792
}
|
class ____(GoogleCloudBaseOperator):
"""
Use the Vertex AI SDK to list experiment runs in experiment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud location that the service belongs to.
:param experiment_name: Required. The name of the evaluation experiment.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"location",
"project_id",
"impersonation_chain",
"experiment_name",
)
def __init__(
self,
*,
project_id: str,
location: str,
experiment_name: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.experiment_name = experiment_name
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list[str]:
self.hook = ExperimentRunHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
experiment_runs = self.hook.list_experiment_runs(
project_id=self.project_id, experiment_name=self.experiment_name, location=self.location
)
except exceptions.NotFound:
raise AirflowException("Experiment %s not found", self.experiment_name)
return [er.name for er in experiment_runs]
|
ListExperimentRunsOperator
|
python
|
pypa__warehouse
|
warehouse/i18n/extensions.py
|
{
"start": 1939,
"end": 3463
}
|
class ____(InternationalizationExtension):
"""
Replica of InternationalizationExtension which overrides a single
method _install_callables to inject our own wrappers for gettext
and ngettext with the _make_newer_gettext and _make_newer_ngettext
defined above.
Diff from original method is:
- gettext = _make_new_gettext(gettext)
- ngettext = _make_new_ngettext(ngettext)
+ gettext = _make_newer_gettext(gettext)
+ ngettext = _make_newer_ngettext(ngettext)
"""
def _install_callables(
self,
gettext: t.Callable[[str], str],
ngettext: t.Callable[[str, str, int], str],
newstyle: t.Optional[bool] = None,
pgettext: t.Optional[t.Callable[[str, str], str]] = None,
npgettext: t.Optional[t.Callable[[str, str, str, int], str]] = None,
) -> None:
if newstyle is not None:
self.environment.newstyle_gettext = newstyle # type: ignore
if self.environment.newstyle_gettext: # type: ignore
gettext = _make_newer_gettext(gettext)
ngettext = _make_newer_ngettext(ngettext)
if pgettext is not None:
pgettext = _make_new_pgettext(pgettext)
if npgettext is not None:
npgettext = _make_new_npgettext(npgettext)
self.environment.globals.update(
gettext=gettext, ngettext=ngettext, pgettext=pgettext, npgettext=npgettext
)
|
FallbackInternationalizationExtension
|
python
|
coleifer__peewee
|
tests/pwiz_integration.py
|
{
"start": 2721,
"end": 3083
}
|
class ____(BaseModel):
user = ForeignKeyField(column_name='user_id', field='username', model=User)
text = TextField(index=True)
data = IntegerField()
misc = IntegerField()
class Meta:
table_name = 'note'
indexes = (
(('user', 'data', 'misc'), False),
(('user', 'text'), True),
)
""".strip()
|
Note
|
python
|
pytorch__pytorch
|
torch/distributions/exp_family.py
|
{
"start": 185,
"end": 2485
}
|
class ____(Distribution):
r"""
ExponentialFamily is the abstract base class for probability distributions belonging to an
exponential family, whose probability mass/density function has the form is defined below
.. math::
p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x))
where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic,
:math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier
measure.
Note:
This class is an intermediary between the `Distribution` class and distributions which belong
to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL
divergence methods. We use this class to compute the entropy and KL divergence using the AD
framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and
Cross-entropies of Exponential Families).
"""
@property
def _natural_params(self) -> tuple[Tensor, ...]:
"""
Abstract method for natural parameters. Returns a tuple of Tensors based
on the distribution
"""
raise NotImplementedError
def _log_normalizer(self, *natural_params):
"""
Abstract method for log normalizer function. Returns a log normalizer based on
the distribution and input
"""
raise NotImplementedError
@property
def _mean_carrier_measure(self) -> float:
"""
Abstract method for expected carrier measure, which is required for computing
entropy.
"""
raise NotImplementedError
def entropy(self):
"""
Method to compute the entropy using Bregman divergence of the log normalizer.
"""
result: Union[Tensor, float] = -self._mean_carrier_measure
nparams = [p.detach().requires_grad_() for p in self._natural_params]
lg_normal = self._log_normalizer(*nparams)
gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True)
result += lg_normal
for np, g in zip(nparams, gradients):
result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1)
return result
|
ExponentialFamily
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/spacetobatch_op_test.py
|
{
"start": 2954,
"end": 3202
}
|
class ____(object):
@staticmethod
def space_to_batch(*args, **kwargs):
return array_ops.space_to_batch(*args, **kwargs)
@staticmethod
def batch_to_space(*args, **kwargs):
return array_ops.batch_to_space(*args, **kwargs)
|
PythonOpImpl
|
python
|
PyCQA__pyflakes
|
pyflakes/test/test_api.py
|
{
"start": 9375,
"end": 21308
}
|
class ____(TestCase):
"""
Tests for L{check} and L{checkPath} which check a file for flakes.
"""
@contextlib.contextmanager
def makeTempFile(self, content):
"""
Make a temporary file containing C{content} and return a path to it.
"""
fd, name = tempfile.mkstemp()
try:
with os.fdopen(fd, 'wb') as f:
if not hasattr(content, 'decode'):
content = content.encode('ascii')
f.write(content)
yield name
finally:
os.remove(name)
def assertHasErrors(self, path, errorList):
"""
Assert that C{path} causes errors.
@param path: A path to a file to check.
@param errorList: A list of errors expected to be printed to stderr.
"""
err = io.StringIO()
count = withStderrTo(err, checkPath, path)
self.assertEqual(
(count, err.getvalue()), (len(errorList), ''.join(errorList)))
def getErrors(self, path):
"""
Get any warnings or errors reported by pyflakes for the file at C{path}.
@param path: The path to a Python file on disk that pyflakes will check.
@return: C{(count, log)}, where C{count} is the number of warnings or
errors generated, and log is a list of those warnings, presented
as structured data. See L{LoggingReporter} for more details.
"""
log = []
reporter = LoggingReporter(log)
count = checkPath(path, reporter)
return count, log
def test_legacyScript(self):
from pyflakes.scripts import pyflakes as script_pyflakes
self.assertIs(script_pyflakes.checkPath, checkPath)
def test_missingTrailingNewline(self):
"""
Source which doesn't end with a newline shouldn't cause any
exception to be raised nor an error indicator to be returned by
L{check}.
"""
with self.makeTempFile("def foo():\n\tpass\n\t") as fName:
self.assertHasErrors(fName, [])
def test_checkPathNonExisting(self):
"""
L{checkPath} handles non-existing files.
"""
count, errors = self.getErrors('extremo')
self.assertEqual(count, 1)
self.assertEqual(
errors,
[('unexpectedError', 'extremo', 'No such file or directory')])
def test_multilineSyntaxError(self):
"""
Source which includes a syntax error which results in the raised
L{SyntaxError.text} containing multiple lines of source are reported
with only the last line of that source.
"""
source = """\
def foo():
'''
def bar():
pass
def baz():
'''quux'''
"""
# Sanity check - SyntaxError.text should be multiple lines, if it
# isn't, something this test was unprepared for has happened.
def evaluate(source):
exec(source)
try:
evaluate(source)
except SyntaxError as e:
if not PYPY and sys.version_info < (3, 10):
self.assertTrue(e.text.count('\n') > 1)
else:
self.fail()
with self.makeTempFile(source) as sourcePath:
if PYPY:
message = 'end of file (EOF) while scanning triple-quoted string literal'
elif sys.version_info >= (3, 10):
message = 'unterminated triple-quoted string literal (detected at line 8)' # noqa: E501
else:
message = 'invalid syntax'
if PYPY or sys.version_info >= (3, 10):
column = 12
else:
column = 8
self.assertHasErrors(
sourcePath,
["""\
%s:8:%d: %s
'''quux'''
%s^
""" % (sourcePath, column, message, ' ' * (column - 1))])
def test_eofSyntaxError(self):
"""
The error reported for source files which end prematurely causing a
syntax error reflects the cause for the syntax error.
"""
with self.makeTempFile("def foo(") as sourcePath:
if PYPY:
msg = 'parenthesis is never closed'
elif sys.version_info >= (3, 10):
msg = "'(' was never closed"
else:
msg = 'unexpected EOF while parsing'
if PYPY or sys.version_info >= (3, 10):
column = 8
else:
column = 9
spaces = ' ' * (column - 1)
expected = '{}:1:{}: {}\ndef foo(\n{}^\n'.format(
sourcePath, column, msg, spaces
)
self.assertHasErrors(sourcePath, [expected])
def test_eofSyntaxErrorWithTab(self):
"""
The error reported for source files which end prematurely causing a
syntax error reflects the cause for the syntax error.
"""
with self.makeTempFile("if True:\n\tfoo =") as sourcePath:
self.assertHasErrors(
sourcePath,
[f"""\
{sourcePath}:2:7: invalid syntax
\tfoo =
\t ^
"""])
def test_nonDefaultFollowsDefaultSyntaxError(self):
"""
Source which has a non-default argument following a default argument
should include the line number of the syntax error. However these
exceptions do not include an offset.
"""
source = """\
def foo(bar=baz, bax):
pass
"""
with self.makeTempFile(source) as sourcePath:
if sys.version_info >= (3, 12):
msg = 'parameter without a default follows parameter with a default' # noqa: E501
else:
msg = 'non-default argument follows default argument'
if PYPY:
column = 18
elif sys.version_info >= (3, 10):
column = 18
else:
column = 21
last_line = ' ' * (column - 1) + '^\n'
self.assertHasErrors(
sourcePath,
[f"""\
{sourcePath}:1:{column}: {msg}
def foo(bar=baz, bax):
{last_line}"""]
)
def test_nonKeywordAfterKeywordSyntaxError(self):
"""
Source which has a non-keyword argument after a keyword argument should
include the line number of the syntax error. However these exceptions
do not include an offset.
"""
source = """\
foo(bar=baz, bax)
"""
with self.makeTempFile(source) as sourcePath:
last_line = ' ' * 16 + '^\n'
self.assertHasErrors(
sourcePath,
[f"""\
{sourcePath}:1:17: positional argument follows keyword argument
foo(bar=baz, bax)
{last_line}"""])
def test_invalidEscape(self):
"""
The invalid escape syntax raises ValueError in Python 2
"""
# ValueError: invalid \x escape
with self.makeTempFile(r"foo = '\xyz'") as sourcePath:
position_end = 1
if PYPY:
column = 7
elif sys.version_info < (3, 12):
column = 13
else:
column = 7
last_line = '%s^\n' % (' ' * (column - 1))
decoding_error = """\
%s:1:%d: (unicode error) 'unicodeescape' codec can't decode bytes \
in position 0-%d: truncated \\xXX escape
foo = '\\xyz'
%s""" % (sourcePath, column, position_end, last_line)
self.assertHasErrors(
sourcePath, [decoding_error])
@skipIf(sys.platform == 'win32', 'unsupported on Windows')
def test_permissionDenied(self):
"""
If the source file is not readable, this is reported on standard
error.
"""
if os.getuid() == 0:
self.skipTest('root user can access all files regardless of '
'permissions')
with self.makeTempFile('') as sourcePath:
os.chmod(sourcePath, 0)
count, errors = self.getErrors(sourcePath)
self.assertEqual(count, 1)
self.assertEqual(
errors,
[('unexpectedError', sourcePath, "Permission denied")])
def test_pyflakesWarning(self):
"""
If the source file has a pyflakes warning, this is reported as a
'flake'.
"""
with self.makeTempFile("import foo") as sourcePath:
count, errors = self.getErrors(sourcePath)
self.assertEqual(count, 1)
self.assertEqual(
errors, [('flake', str(UnusedImport(sourcePath, Node(1), 'foo')))])
def test_encodedFileUTF8(self):
"""
If source file declares the correct encoding, no error is reported.
"""
SNOWMAN = chr(0x2603)
source = ("""\
# coding: utf-8
x = "%s"
""" % SNOWMAN).encode('utf-8')
with self.makeTempFile(source) as sourcePath:
self.assertHasErrors(sourcePath, [])
def test_CRLFLineEndings(self):
"""
Source files with Windows CR LF line endings are parsed successfully.
"""
with self.makeTempFile("x = 42\r\n") as sourcePath:
self.assertHasErrors(sourcePath, [])
def test_misencodedFileUTF8(self):
"""
If a source file contains bytes which cannot be decoded, this is
reported on stderr.
"""
SNOWMAN = chr(0x2603)
source = ("""\
# coding: ascii
x = "%s"
""" % SNOWMAN).encode('utf-8')
with self.makeTempFile(source) as sourcePath:
self.assertHasErrors(
sourcePath,
[f"{sourcePath}:1:1: 'ascii' codec can't decode byte 0xe2 in position 21: ordinal not in range(128)\n"]) # noqa: E501
def test_misencodedFileUTF16(self):
"""
If a source file contains bytes which cannot be decoded, this is
reported on stderr.
"""
SNOWMAN = chr(0x2603)
source = ("""\
# coding: ascii
x = "%s"
""" % SNOWMAN).encode('utf-16')
with self.makeTempFile(source) as sourcePath:
if sys.version_info < (3, 11, 4):
expected = f"{sourcePath}: problem decoding source\n"
else:
expected = f"{sourcePath}:1: source code string cannot contain null bytes\n" # noqa: E501
self.assertHasErrors(sourcePath, [expected])
def test_checkRecursive(self):
"""
L{checkRecursive} descends into each directory, finding Python files
and reporting problems.
"""
tempdir = tempfile.mkdtemp()
try:
os.mkdir(os.path.join(tempdir, 'foo'))
file1 = os.path.join(tempdir, 'foo', 'bar.py')
with open(file1, 'wb') as fd:
fd.write(b"import baz\n")
file2 = os.path.join(tempdir, 'baz.py')
with open(file2, 'wb') as fd:
fd.write(b"import contraband")
log = []
reporter = LoggingReporter(log)
warnings = checkRecursive([tempdir], reporter)
self.assertEqual(warnings, 2)
self.assertEqual(
sorted(log),
sorted([('flake', str(UnusedImport(file1, Node(1), 'baz'))),
('flake',
str(UnusedImport(file2, Node(1), 'contraband')))]))
finally:
shutil.rmtree(tempdir)
def test_stdinReportsErrors(self):
"""
L{check} reports syntax errors from stdin
"""
source = "max(1 for i in range(10), key=lambda x: x+1)\n"
err = io.StringIO()
count = withStderrTo(err, check, source, "<stdin>")
self.assertEqual(count, 1)
errlines = err.getvalue().split("\n")[:-1]
expected_error = [
"<stdin>:1:5: Generator expression must be parenthesized",
"max(1 for i in range(10), key=lambda x: x+1)",
" ^",
]
self.assertEqual(errlines, expected_error)
|
CheckTests
|
python
|
joerick__pyinstrument
|
pyinstrument/middleware.py
|
{
"start": 862,
"end": 4239
}
|
class ____(MiddlewareMixin): # type: ignore
def process_request(self, request):
profile_dir = getattr(settings, "PYINSTRUMENT_PROFILE_DIR", None)
func_or_path = getattr(settings, "PYINSTRUMENT_SHOW_CALLBACK", None)
if isinstance(func_or_path, str):
show_pyinstrument = import_string(func_or_path)
elif callable(func_or_path):
show_pyinstrument = func_or_path
else:
show_pyinstrument = lambda request: True
if (
show_pyinstrument(request)
and getattr(settings, "PYINSTRUMENT_URL_ARGUMENT", "profile") in request.GET
) or profile_dir:
profiler = Profiler()
profiler.start()
request.profiler = profiler
def process_response(self, request, response):
if hasattr(request, "profiler"):
profile_session = request.profiler.stop()
default_filename_template = "{total_time:.3f}s {path} {timestamp:.0f}.{ext}"
configured_renderer = getattr(settings, "PYINSTRUMENT_PROFILE_DIR_RENDERER", None)
renderer = get_renderer(configured_renderer)
output = renderer.render(profile_session)
profile_dir = getattr(settings, "PYINSTRUMENT_PROFILE_DIR", None)
filename_cb = getattr(settings, "PYINSTRUMENT_FILENAME_CALLBACK", None)
filename_template = getattr(
settings, "PYINSTRUMENT_FILENAME", default_filename_template
)
# Limit the length of the file name (255 characters is the max limit on major current OS, but it is rather
# high and the other parts (see line 36) are to be taken into account; so a hundred will be fine here).
path = request.get_full_path().replace("/", "_")[:100]
# Swap ? for _qs_ on Windows, as it does not support ? in filenames.
if sys.platform in ["win32", "cygwin"]:
path = path.replace("?", "_qs_")
if profile_dir:
if filename_cb and callable(filename_cb):
filename = filename_cb(request, profile_session, renderer)
if not isinstance(filename, str):
raise ValueError("Filename callback return value should be a string")
else:
filename = filename_template.format(
total_time=profile_session.duration,
path=path,
timestamp=time.time(),
ext=renderer.output_file_extension,
)
file_path = os.path.join(profile_dir, filename)
if not os.path.exists(profile_dir):
os.mkdir(profile_dir)
with open(file_path, "w", encoding="utf-8") as f:
f.write(output)
if getattr(settings, "PYINSTRUMENT_URL_ARGUMENT", "profile") in request.GET:
if isinstance(renderer, HTMLRenderer):
return HttpResponse(output) # type: ignore
else:
renderer = HTMLRenderer()
output = renderer.render(profile_session)
return HttpResponse(output) # type: ignore
else:
return response
else:
return response
|
ProfilerMiddleware
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/layers/recurrent.py
|
{
"start": 81366,
"end": 91648
}
|
class ____(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. An individual `True` entry indicates
that the corresponding timestep should be utilized, while a `False`
entry indicates that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
implementation = kwargs.pop('implementation', 1)
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
if 'enable_caching_device' in kwargs:
cell_kwargs = {'enable_caching_device':
kwargs.pop('enable_caching_device')}
else:
cell_kwargs = {}
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get('dtype'),
trainable=kwargs.get('trainable', True),
**cell_kwargs)
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
config.update(_config_for_enable_caching_device(self.cell))
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
|
GRU
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_zip9.py
|
{
"start": 1610,
"end": 3927
}
|
class ____(ColumnMapExpectation):
"""Expect values in this column to be valid zip9 string types.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_zip9": ["90001-0000", "78884-5888", "20010-8795", "10011-9999"],
"invalid_zip9": ["1111-10000", "1234", "99999", "254878291"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_zip9"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_zip9"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_zip9"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidZip9().print_diagnostic_checklist()
|
ExpectColumnValuesToBeValidZip9
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/external.py
|
{
"start": 6889,
"end": 7152
}
|
class ____(graphene.ObjectType):
class Meta:
name = "FeatureFlag"
name = graphene.NonNull(graphene.String)
enabled = graphene.NonNull(graphene.Boolean)
GrapheneDefinitionsSource = graphene.Enum.from_enum(DefinitionsSource)
|
GrapheneFeatureFlag
|
python
|
astropy__astropy
|
astropy/modeling/tests/test_fitters.py
|
{
"start": 7545,
"end": 13511
}
|
class ____:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
MESSAGE = r"Model must be simple, not compound"
with pytest.raises(ValueError, match=MESSAGE):
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
fitter(init_model_comp, x, y)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join("data", "idcompspec.fits"))
with open(test_file) as f:
lines = f.read()
reclist = lines.split("begin")
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields["order"])
initial_model = models.Chebyshev1D(order - 1, domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs), rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected, rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected, rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5 * x * x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5 * x * x, -2 * x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)
assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(
degree=2,
c1_0=[1, 2],
c0_1=[-0.5, 1],
n_models=2,
fixed={"c1_0": True, "c0_1": True},
)
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2 * x + 1, x - 2], mask=np.zeros_like([x, x]))
y[0, 7] = 100.0 # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.0
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1, [2.0, 1.0], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array(
[2 * x + 3 * y + 1, x - 0.5 * y - 2], mask=np.zeros_like([x, x])
)
z[0, 3, 1] = -1000.0 # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1.0, -2.0], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2.0, 1.0], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3.0, -0.5], atol=1e-14)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
|
TestLinearLSQFitter
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/data_preprocessing/rescaling/robust_scaler.py
|
{
"start": 731,
"end": 2827
}
|
class ____(Rescaling, AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
q_min: float,
q_max: float,
random_state: Optional[Union[int, np.random.RandomState]] = None,
) -> None:
from sklearn.preprocessing import RobustScaler
self.q_min = q_min
self.q_max = q_max
self.preprocessor = RobustScaler(
quantile_range=(self.q_min, self.q_max),
copy=False,
)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "RobustScaler",
"name": "RobustScaler",
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
# TODO find out if this is right!
"handles_sparse": True,
"handles_dense": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (INPUT, SIGNED_DATA),
"preferred_dtype": None,
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
q_min = UniformFloatHyperparameter("q_min", 0.001, 0.3, default_value=0.25)
q_max = UniformFloatHyperparameter("q_max", 0.7, 0.999, default_value=0.75)
cs.add_hyperparameters((q_min, q_max))
return cs
def fit(
self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> "AutoSklearnPreprocessingAlgorithm":
if self.preprocessor is None:
raise NotFittedError()
if sparse.isspmatrix(X):
self.preprocessor.set_params(with_centering=False)
return super(RobustScalerComponent, self).fit(X, y)
|
RobustScalerComponent
|
python
|
cython__cython
|
Cython/Compiler/UtilNodes.py
|
{
"start": 9733,
"end": 10935
}
|
class ____(Nodes.StatNode, LetNodeMixin):
# Implements a local temporary variable scope. Imagine this
# syntax being present:
# let temp = VALUE:
# BLOCK (can modify temp)
# if temp is an object, decref
#
# Usually used after analysis phase, but forwards analysis methods
# to its children
child_attrs = ['temp_expression', 'body']
def __init__(self, lazy_temp, body):
self.set_temp_expr(lazy_temp)
self.pos = body.pos
self.body = body
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.temp_expression = self.temp_expression.analyse_expressions(env)
self.body = self.body.analyse_expressions(env)
return self
def generate_execution_code(self, code):
self.setup_temp_expr(code)
self.body.generate_execution_code(code)
self.teardown_temp_expr(code)
def generate_function_definitions(self, env, code):
self.temp_expression.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
|
LetNode
|
python
|
pypa__setuptools
|
setuptools/config/setupcfg.py
|
{
"start": 26588,
"end": 26695
}
|
class ____(SetuptoolsDeprecationWarning):
_SEE_DOCS = "userguide/declarative_config.html"
|
_DeprecatedConfig
|
python
|
pytorch__pytorch
|
torch/_inductor/runtime/caching/exceptions.py
|
{
"start": 3519,
"end": 3814
}
|
class ____(UserError):
"""Base class for errors that occur during cache value encoding operations.
Raised when cache values cannot be properly encoded for storage or transmission.
This includes serialization, compression, or other encoding-related failures.
"""
|
ValueEncodingError
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/schema.py
|
{
"start": 205645,
"end": 229594
}
|
class ____(HasSchemaAttr):
"""A collection of :class:`_schema.Table`
objects and their associated schema
constructs.
Holds a collection of :class:`_schema.Table` objects as well as
an optional binding to an :class:`_engine.Engine` or
:class:`_engine.Connection`. If bound, the :class:`_schema.Table` objects
in the collection and their columns may participate in implicit SQL
execution.
The :class:`_schema.Table` objects themselves are stored in the
:attr:`_schema.MetaData.tables` dictionary.
:class:`_schema.MetaData` is a thread-safe object for read operations.
Construction of new tables within a single :class:`_schema.MetaData`
object,
either explicitly or via reflection, may not be completely thread-safe.
.. seealso::
:ref:`metadata_describing` - Introduction to database metadata
"""
__visit_name__ = "metadata"
def __init__(
self,
schema: Optional[str] = None,
quote_schema: Optional[bool] = None,
naming_convention: Optional[_NamingSchemaParameter] = None,
info: Optional[_InfoType] = None,
) -> None:
"""Create a new MetaData object.
:param schema:
The default schema to use for the :class:`_schema.Table`,
:class:`.Sequence`, and potentially other objects associated with
this :class:`_schema.MetaData`. Defaults to ``None``.
.. seealso::
:ref:`schema_metadata_schema_name` - details on how the
:paramref:`_schema.MetaData.schema` parameter is used.
:paramref:`_schema.Table.schema`
:paramref:`.Sequence.schema`
:param quote_schema:
Sets the ``quote_schema`` flag for those :class:`_schema.Table`,
:class:`.Sequence`, and other objects which make usage of the
local ``schema`` name.
:param info: Optional data dictionary which will be populated into the
:attr:`.SchemaItem.info` attribute of this object.
:param naming_convention: a dictionary referring to values which
will establish default naming conventions for :class:`.Constraint`
and :class:`.Index` objects, for those objects which are not given
a name explicitly.
The keys of this dictionary may be:
* a constraint or Index class, e.g. the :class:`.UniqueConstraint`,
:class:`_schema.ForeignKeyConstraint` class, the :class:`.Index`
class
* a string mnemonic for one of the known constraint classes;
``"fk"``, ``"pk"``, ``"ix"``, ``"ck"``, ``"uq"`` for foreign key,
primary key, index, check, and unique constraint, respectively.
* the string name of a user-defined "token" that can be used
to define new naming tokens.
The values associated with each "constraint class" or "constraint
mnemonic" key are string naming templates, such as
``"uq_%(table_name)s_%(column_0_name)s"``,
which describe how the name should be composed. The values
associated with user-defined "token" keys should be callables of the
form ``fn(constraint, table)``, which accepts the constraint/index
object and :class:`_schema.Table` as arguments, returning a string
result.
The built-in names are as follows, some of which may only be
available for certain types of constraint:
* ``%(table_name)s`` - the name of the :class:`_schema.Table`
object
associated with the constraint.
* ``%(referred_table_name)s`` - the name of the
:class:`_schema.Table`
object associated with the referencing target of a
:class:`_schema.ForeignKeyConstraint`.
* ``%(column_0_name)s`` - the name of the :class:`_schema.Column`
at
index position "0" within the constraint.
* ``%(column_0N_name)s`` - the name of all :class:`_schema.Column`
objects in order within the constraint, joined without a
separator.
* ``%(column_0_N_name)s`` - the name of all
:class:`_schema.Column`
objects in order within the constraint, joined with an
underscore as a separator.
* ``%(column_0_label)s``, ``%(column_0N_label)s``,
``%(column_0_N_label)s`` - the label of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(column_0_key)s``, ``%(column_0N_key)s``,
``%(column_0_N_key)s`` - the key of either the zeroth
:class:`_schema.Column` or all :class:`.Columns`, separated with
or without an underscore
* ``%(referred_column_0_name)s``, ``%(referred_column_0N_name)s``
``%(referred_column_0_N_name)s``, ``%(referred_column_0_key)s``,
``%(referred_column_0N_key)s``, ... column tokens which
render the names/keys/labels of columns that are referenced
by a :class:`_schema.ForeignKeyConstraint`.
* ``%(constraint_name)s`` - a special key that refers to the
existing name given to the constraint. When this key is
present, the :class:`.Constraint` object's existing name will be
replaced with one that is composed from template string that
uses this token. When this token is present, it is required that
the :class:`.Constraint` is given an explicit name ahead of time.
* user-defined: any additional token may be implemented by passing
it along with a ``fn(constraint, table)`` callable to the
naming_convention dictionary.
.. seealso::
:ref:`constraint_naming_conventions` - for detailed usage
examples.
"""
if schema is not None and not isinstance(schema, str):
raise exc.ArgumentError(
"expected schema argument to be a string, "
f"got {type(schema)}."
)
self.tables = util.FacadeDict()
self.schema = quoted_name.construct(schema, quote_schema)
self.naming_convention = (
naming_convention
if naming_convention
else DEFAULT_NAMING_CONVENTION
)
if info:
self.info = info
self._schemas: Set[str] = set()
self._sequences: Dict[str, Sequence] = {}
self._fk_memos: Dict[Tuple[str, Optional[str]], List[ForeignKey]] = (
collections.defaultdict(list)
)
self._objects: Set[Union[HasSchemaAttr, SchemaType]] = set()
tables: util.FacadeDict[str, Table]
"""A dictionary of :class:`_schema.Table`
objects keyed to their name or "table key".
The exact key is that determined by the :attr:`_schema.Table.key`
attribute;
for a table with no :attr:`_schema.Table.schema` attribute,
this is the same
as :attr:`_schema.Table.name`. For a table with a schema,
it is typically of the
form ``schemaname.tablename``.
.. seealso::
:attr:`_schema.MetaData.sorted_tables`
"""
def __repr__(self) -> str:
return "MetaData()"
def __contains__(self, table_or_key: Union[str, Table]) -> bool:
if not isinstance(table_or_key, str):
table_or_key = table_or_key.key
return table_or_key in self.tables
def _add_table(
self, name: str, schema: Optional[str], table: Table
) -> None:
key = _get_table_key(name, schema)
self.tables._insert_item(key, table)
if schema:
self._schemas.add(schema)
def _remove_table(self, name: str, schema: Optional[str]) -> None:
key = _get_table_key(name, schema)
removed = dict.pop(self.tables, key, None)
if removed is not None:
for fk in removed.foreign_keys:
fk._remove_from_metadata(self)
if self._schemas:
self._schemas = {
t.schema for t in self.tables.values() if t.schema is not None
}
def __getstate__(self) -> Dict[str, Any]:
return {
"tables": self.tables,
"schema": self.schema,
"schemas": self._schemas,
"sequences": self._sequences,
"fk_memos": self._fk_memos,
"naming_convention": self.naming_convention,
"objects": self._objects,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
self.tables = state["tables"]
self.schema = state["schema"]
self.naming_convention = state["naming_convention"]
self._sequences = state["sequences"]
self._schemas = state["schemas"]
self._fk_memos = state["fk_memos"]
self._objects = state.get("objects", set())
def clear(self) -> None:
"""Clear all objects from this MetaData."""
dict.clear(self.tables)
self._schemas.clear()
self._fk_memos.clear()
self._sequences.clear()
self._objects.clear()
def remove(self, table: Table) -> None:
"""Remove the given Table object from this MetaData."""
self._remove_table(table.name, table.schema)
@property
def sorted_tables(self) -> List[Table]:
"""Returns a list of :class:`_schema.Table` objects sorted in order of
foreign key dependency.
The sorting will place :class:`_schema.Table`
objects that have dependencies
first, before the dependencies themselves, representing the
order in which they can be created. To get the order in which
the tables would be dropped, use the ``reversed()`` Python built-in.
.. warning::
The :attr:`.MetaData.sorted_tables` attribute cannot by itself
accommodate automatic resolution of dependency cycles between
tables, which are usually caused by mutually dependent foreign key
constraints. When these cycles are detected, the foreign keys
of these tables are omitted from consideration in the sort.
A warning is emitted when this condition occurs, which will be an
exception raise in a future release. Tables which are not part
of the cycle will still be returned in dependency order.
To resolve these cycles, the
:paramref:`_schema.ForeignKeyConstraint.use_alter` parameter may be
applied to those constraints which create a cycle. Alternatively,
the :func:`_schema.sort_tables_and_constraints` function will
automatically return foreign key constraints in a separate
collection when cycles are detected so that they may be applied
to a schema separately.
.. seealso::
:func:`_schema.sort_tables`
:func:`_schema.sort_tables_and_constraints`
:attr:`_schema.MetaData.tables`
:meth:`_reflection.Inspector.get_table_names`
:meth:`_reflection.Inspector.get_sorted_table_and_fkc_names`
"""
return ddl.sort_tables(
sorted(self.tables.values(), key=lambda t: t.key) # type: ignore
)
# overload needed to work around mypy this mypy
# https://github.com/python/mypy/issues/17093
@overload
def reflect(
self,
bind: Engine,
schema: Optional[str] = ...,
views: bool = ...,
only: Union[
_typing_Sequence[str], Callable[[str, MetaData], bool], None
] = ...,
extend_existing: bool = ...,
autoload_replace: bool = ...,
resolve_fks: bool = ...,
**dialect_kwargs: Any,
) -> None: ...
@overload
def reflect(
self,
bind: Connection,
schema: Optional[str] = ...,
views: bool = ...,
only: Union[
_typing_Sequence[str], Callable[[str, MetaData], bool], None
] = ...,
extend_existing: bool = ...,
autoload_replace: bool = ...,
resolve_fks: bool = ...,
**dialect_kwargs: Any,
) -> None: ...
@util.preload_module("sqlalchemy.engine.reflection")
def reflect(
self,
bind: Union[Engine, Connection],
schema: Optional[str] = None,
views: bool = False,
only: Union[
_typing_Sequence[str], Callable[[str, MetaData], bool], None
] = None,
extend_existing: bool = False,
autoload_replace: bool = True,
resolve_fks: bool = True,
**dialect_kwargs: Any,
) -> None:
r"""Load all available table definitions from the database.
Automatically creates ``Table`` entries in this ``MetaData`` for any
table available in the database but not yet present in the
``MetaData``. May be called multiple times to pick up tables recently
added to the database, however no special action is taken if a table
in this ``MetaData`` no longer exists in the database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param schema:
Optional, query and reflect tables from an alternate schema.
If None, the schema associated with this :class:`_schema.MetaData`
is used, if any.
:param views:
If True, also reflect views (materialized and plain).
:param only:
Optional. Load only a sub-set of available named tables. May be
specified as a sequence of names or a callable.
If a sequence of names is provided, only those tables will be
reflected. An error is raised if a table is requested but not
available. Named tables already present in this ``MetaData`` are
ignored.
If a callable is provided, it will be used as a boolean predicate to
filter the list of potential table names. The callable is called
with a table name and this ``MetaData`` instance as positional
arguments and should return a true value for any table to reflect.
:param extend_existing: Passed along to each :class:`_schema.Table` as
:paramref:`_schema.Table.extend_existing`.
:param autoload_replace: Passed along to each :class:`_schema.Table`
as
:paramref:`_schema.Table.autoload_replace`.
:param resolve_fks: if True, reflect :class:`_schema.Table`
objects linked
to :class:`_schema.ForeignKey` objects located in each
:class:`_schema.Table`.
For :meth:`_schema.MetaData.reflect`,
this has the effect of reflecting
related tables that might otherwise not be in the list of tables
being reflected, for example if the referenced table is in a
different schema or is omitted via the
:paramref:`.MetaData.reflect.only` parameter. When False,
:class:`_schema.ForeignKey` objects are not followed to the
:class:`_schema.Table`
in which they link, however if the related table is also part of the
list of tables that would be reflected in any case, the
:class:`_schema.ForeignKey` object will still resolve to its related
:class:`_schema.Table` after the :meth:`_schema.MetaData.reflect`
operation is
complete. Defaults to True.
.. seealso::
:paramref:`_schema.Table.resolve_fks`
:param \**dialect_kwargs: Additional keyword arguments not mentioned
above are dialect specific, and passed in the form
``<dialectname>_<argname>``. See the documentation regarding an
individual dialect at :ref:`dialect_toplevel` for detail on
documented arguments.
.. seealso::
:ref:`metadata_reflection_toplevel`
:meth:`_events.DDLEvents.column_reflect` - Event used to customize
the reflected columns. Usually used to generalize the types using
:meth:`_types.TypeEngine.as_generic`
:ref:`metadata_reflection_dbagnostic_types` - describes how to
reflect tables using general types.
"""
with inspection.inspect(bind)._inspection_context() as insp:
reflect_opts: Any = {
"autoload_with": insp,
"extend_existing": extend_existing,
"autoload_replace": autoload_replace,
"resolve_fks": resolve_fks,
"_extend_on": set(),
}
reflect_opts.update(dialect_kwargs)
if schema is None:
schema = self.schema
if schema is not None:
reflect_opts["schema"] = schema
kind = util.preloaded.engine_reflection.ObjectKind.TABLE
available: util.OrderedSet[str] = util.OrderedSet(
insp.get_table_names(schema, **dialect_kwargs)
)
if views:
kind = util.preloaded.engine_reflection.ObjectKind.ANY
available.update(insp.get_view_names(schema, **dialect_kwargs))
try:
available.update(
insp.get_materialized_view_names(
schema, **dialect_kwargs
)
)
except NotImplementedError:
pass
if schema is not None:
available_w_schema: util.OrderedSet[str] = util.OrderedSet(
[f"{schema}.{name}" for name in available]
)
else:
available_w_schema = available
current = set(self.tables)
if only is None:
load = [
name
for name, schname in zip(available, available_w_schema)
if extend_existing or schname not in current
]
elif callable(only):
load = [
name
for name, schname in zip(available, available_w_schema)
if (extend_existing or schname not in current)
and only(name, self)
]
else:
missing = [name for name in only if name not in available]
if missing:
s = schema and (" schema '%s'" % schema) or ""
missing_str = ", ".join(missing)
raise exc.InvalidRequestError(
f"Could not reflect: requested table(s) not available "
f"in {bind.engine!r}{s}: ({missing_str})"
)
load = [
name
for name in only
if extend_existing or name not in current
]
# pass the available tables so the inspector can
# choose to ignore the filter_names
_reflect_info = insp._get_reflection_info(
schema=schema,
filter_names=load,
available=available,
kind=kind,
scope=util.preloaded.engine_reflection.ObjectScope.ANY,
**dialect_kwargs,
)
reflect_opts["_reflect_info"] = _reflect_info
for name in load:
try:
Table(name, self, **reflect_opts)
except exc.UnreflectableTableError as uerr:
util.warn(f"Skipping table {name}: {uerr}")
def create_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: Union[bool, CheckFirst] = CheckFirst.ALL,
) -> None:
"""Create all tables stored in this metadata.
Conditional by default, will not attempt to recreate tables already
present in the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the total
tables in the ``MetaData`` (others are ignored).
:param checkfirst: A boolean value or instance of :class:`.CheckFirst`.
Indicates which objects should be checked for within a separate pass
before creating schema objects.
"""
bind._run_ddl_visitor(
ddl.SchemaGenerator, self, checkfirst=checkfirst, tables=tables
)
def drop_all(
self,
bind: _CreateDropBind,
tables: Optional[_typing_Sequence[Table]] = None,
checkfirst: Union[bool, CheckFirst] = CheckFirst.ALL,
) -> None:
"""Drop all tables stored in this metadata.
Conditional by default, will not attempt to drop tables not present in
the target database.
:param bind:
A :class:`.Connection` or :class:`.Engine` used to access the
database.
:param tables:
Optional list of ``Table`` objects, which is a subset of the
total tables in the ``MetaData`` (others are ignored).
:param checkfirst: A boolean value or instance of :class:`.CheckFirst`.
Indicates which objects should be checked for within a separate pass
before dropping schema objects.
"""
bind._run_ddl_visitor(
ddl.SchemaDropper, self, checkfirst=checkfirst, tables=tables
)
@property
def schemas(self) -> _typing_Sequence[str]:
"""A sequence of schema names that are present in this MetaData."""
schemas = self._schemas
if self.schema:
schemas = schemas | {self.schema}
return tuple(schemas)
def get_schema_objects(
self,
kind: Type[_T],
*,
schema: Union[str, None, Literal[_NoArg.NO_ARG]] = _NoArg.NO_ARG,
) -> _typing_Sequence[_T]:
"""Return a sequence of schema objects of the given kind.
This method can be used to return :class:`_sqltypes.Enum`,
:class:`.Sequence`, etc. objects registered in this
:class:`_schema.MetaData`.
:param kind: a type that indicates what object to return, such as
:class:`Enum` or :class:`Sequence`.
:param schema: Optional, a schema name to filter the objects by. If
not provided the default schema of the metadata is used.
"""
if schema is _NoArg.NO_ARG:
schema = self.schema
return tuple(
obj
for obj in self._objects
if isinstance(obj, kind) and obj.schema == schema
)
def get_schema_object_by_name(
self,
kind: Type[_T],
name: str,
*,
schema: Union[str, None, Literal[_NoArg.NO_ARG]] = _NoArg.NO_ARG,
) -> Optional[_T]:
"""Return a schema objects of the given kind and name if found.
This method can be used to return :class:`_sqltypes.Enum`,
:class:`.Sequence`, etc. objects registered in this
:class:`_schema.MetaData`.
:param kind: a type that indicates what object to return, such as
:class:`Enum` or :class:`Sequence`.
:param name: the name of the object to return.
:param schema: Optional, a schema name to filter the objects by. If
not provided the default schema of the metadata is used.
"""
for obj in self.get_schema_objects(kind, schema=schema):
if getattr(obj, "name", None) == name:
return obj
return None
def _register_object(self, obj: Union[HasSchemaAttr, SchemaType]) -> None:
self._objects.add(obj)
|
MetaData
|
python
|
kamyu104__LeetCode-Solutions
|
Python/largest-multiple-of-three.py
|
{
"start": 895,
"end": 1800
}
|
class ____(object):
def largestMultipleOfThree(self, digits):
"""
:type digits: List[int]
:rtype: str
"""
def candidates_gen(r):
if r == 0:
return
for i in xrange(10):
yield [i]
for i in xrange(10):
for j in xrange(i+1):
yield [i, j]
count, r = collections.Counter(digits), sum(digits)%3
for deletes in candidates_gen(r):
delete_count = collections.Counter(deletes)
if sum(deletes)%3 == r and \
all(count[k] >= v for k, v in delete_count.iteritems()):
for k, v in delete_count.iteritems():
count[k] -= v
break
result = "".join(str(d)*count[d] for d in reversed(xrange(10)))
return "0" if result and result[0] == '0' else result
|
Solution2
|
python
|
lazyprogrammer__machine_learning_examples
|
supervised_class/bayes.py
|
{
"start": 610,
"end": 2276
}
|
class ____(object):
def fit(self, X, Y, smoothing=1e-2):
N, D = X.shape
self.gaussians = dict()
self.priors = dict()
labels = set(Y)
for c in labels:
current_x = X[Y == c]
self.gaussians[c] = {
'mean': current_x.mean(axis=0),
'cov': np.cov(current_x.T) + np.eye(D)*smoothing,
}
self.priors[c] = float(len(Y[Y == c])) / len(Y)
def score(self, X, Y):
P = self.predict(X)
return np.mean(P == Y)
def predict(self, X):
N, D = X.shape
K = len(self.gaussians)
P = np.zeros((N, K))
for c, g in iteritems(self.gaussians):
mean, cov = g['mean'], g['cov']
P[:,c] = mvn.logpdf(X, mean=mean, cov=cov) + np.log(self.priors[c])
return np.argmax(P, axis=1)
if __name__ == '__main__':
X, Y = get_data(10000)
Ntrain = len(Y) // 2
Xtrain, Ytrain = X[:Ntrain], Y[:Ntrain]
Xtest, Ytest = X[Ntrain:], Y[Ntrain:]
model = Bayes()
t0 = datetime.now()
model.fit(Xtrain, Ytrain)
print("Training time:", (datetime.now() - t0))
t0 = datetime.now()
print("Train accuracy:", model.score(Xtrain, Ytrain))
print("Time to compute train accuracy:", (datetime.now() - t0), "Train size:", len(Ytrain))
t0 = datetime.now()
print("Test accuracy:", model.score(Xtest, Ytest))
print("Time to compute test accuracy:", (datetime.now() - t0), "Test size:", len(Ytest))
# plot the mean of each class
for c, g in iteritems(model.gaussians):
plt.imshow(g['mean'].reshape(28, 28))
plt.title(c)
plt.show()
|
Bayes
|
python
|
spyder-ide__spyder
|
spyder/api/shellconnect/main_widget.py
|
{
"start": 1392,
"end": 7842
}
|
class ____(PluginMainWidget):
"""
Main widget to use in a plugin that shows console-specific content.
Notes
-----
* This is composed of a QStackedWidget to stack widgets associated to each
shell widget in the console and only show one of them at a time.
* The current widget in the stack will display the content associated to
the console with focus.
"""
def __init__(self, *args, set_layout=True, **kwargs):
super().__init__(*args, **kwargs)
# Widgets
if not (
self.SHOW_MESSAGE_WHEN_EMPTY
and self.get_conf(
"show_message_when_panes_are_empty", section="main"
)
):
self._stack = QStackedWidget(self)
if set_layout:
layout = QVBoxLayout()
layout.addWidget(self._stack)
self.setLayout(layout)
self._shellwidgets = {}
# ---- PluginMainWidget API
# ------------------------------------------------------------------------
def current_widget(self):
"""
Return the current widget in the stack.
Returns
-------
QWidget
The current widget.
"""
return self._content_widget
def get_focus_widget(self):
return self._stack.currentWidget()
# ---- SpyderWidgetMixin API
# ------------------------------------------------------------------------
def update_style(self):
self._stack.setStyleSheet("QStackedWidget {padding: 0px; border: 0px}")
# ---- Stack accesors
# ------------------------------------------------------------------------
def count(self):
"""
Return the number of widgets in the stack.
Returns
-------
int
The number of widgets in the stack.
"""
return self._stack.count()
def get_widget_for_shellwidget(self, shellwidget):
"""return widget corresponding to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id in self._shellwidgets:
return self._shellwidgets[shellwidget_id]
return None
# ---- Public API
# ------------------------------------------------------------------------
def add_shellwidget(self, shellwidget):
"""Create a new widget in the stack and associate it to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id not in self._shellwidgets:
widget = self.create_new_widget(shellwidget)
self._stack.addWidget(widget)
self._shellwidgets[shellwidget_id] = widget
# Add all actions to new widget for shortcuts to work.
for __, action in self.get_actions().items():
if action:
widget_actions = widget.actions()
if action not in widget_actions:
widget.addAction(action)
self.set_shellwidget(shellwidget)
def remove_shellwidget(self, shellwidget):
"""Remove widget associated to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id in self._shellwidgets:
widget = self._shellwidgets.pop(shellwidget_id)
# If `widget` is an empty pane, we don't need to remove it from the
# stack (because it's the one we need to show since the console is
# showing an error) nor try to close it (because it makes no
# sense).
if not isinstance(widget, EmptyMessageWidget):
self._stack.removeWidget(widget)
self.close_widget(widget)
self.update_actions()
def set_shellwidget(self, shellwidget):
"""Set widget associated with shellwidget as the current widget."""
old_widget = self.current_widget()
widget = self.get_widget_for_shellwidget(shellwidget)
if widget is None:
return
self.set_content_widget(widget, add_to_stack=False)
if (
self.SHOW_MESSAGE_WHEN_EMPTY
and self.get_conf(
"show_message_when_panes_are_empty", section="main"
)
and widget.is_empty
):
self.show_empty_message()
else:
self.show_content_widget()
self.switch_widget(widget, old_widget)
self.update_actions()
def add_errored_shellwidget(self, shellwidget):
"""
Create a new _ErroredMessageWidget in the stack and associate it to
shellwidget.
This is necessary to show a meaningful message when switching to
consoles with dead kernels.
"""
shellwidget_id = id(shellwidget)
# This can happen if the kernel started without issues but something is
# printed to its stderr stream, which we display as an error in the
# console. In that case, we need to remove the current widget
# associated to shellwidget and replace it by an empty one.
if shellwidget_id in self._shellwidgets:
self._shellwidgets.pop(shellwidget_id)
widget = _ErroredMessageWidget(self, shellwidget)
widget.set_visibility(self.is_visible)
if self.dockwidget is not None:
self.dockwidget.visibilityChanged.connect(widget.set_visibility)
self.set_content_widget(widget)
self._shellwidgets[shellwidget_id] = widget
self.set_shellwidget(shellwidget)
def create_new_widget(self, shellwidget):
"""Create a widget to communicate with shellwidget."""
raise NotImplementedError
def close_widget(self, widget):
"""Close the widget."""
raise NotImplementedError
def switch_widget(self, widget, old_widget):
"""Switch the current widget."""
raise NotImplementedError
def refresh(self):
"""Refresh widgets."""
if self.count():
widget = self.current_widget()
widget.refresh()
def is_current_widget_error_message(self):
"""Check if the current widget is showing an error message."""
return isinstance(self.current_widget(), _ErroredMessageWidget)
def switch_empty_message(self, value: bool):
"""Switch between the empty message widget or the one with content."""
if value:
self.show_empty_message()
else:
self.show_content_widget()
|
ShellConnectMainWidget
|
python
|
pypa__twine
|
twine/sdist.py
|
{
"start": 2069,
"end": 2918
}
|
class ____(SDist):
def read(self) -> bytes:
with zipfile.ZipFile(self.filename) as sdist:
# The sdist must contain a single top-level direcotry...
root = os.path.commonpath(sdist.namelist())
if root in {".", "/", ""}:
raise exceptions.InvalidDistribution(
f"Too many top-level members in sdist archive: {self.filename}"
)
# ...containing the package metadata in a ``PKG-INFO`` file.
with suppress(KeyError):
data = sdist.read(root.rstrip("/") + "/PKG-INFO")
if b"Metadata-Version" in data:
return data
raise exceptions.InvalidDistribution(
"No PKG-INFO in archive or "
f"PKG-INFO missing 'Metadata-Version': {self.filename}"
)
|
ZipSDist
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job_manager.py
|
{
"start": 359,
"end": 2635
}
|
class ____:
"""
Centralizes throttle/concurrency. Jobs call try_consume() before starting,
and must call release() exactly once when they finish (success/skip/fail/timeout).
"""
def __init__(self, api, account_id: str, *, throttle_limit: float = 90.0, max_jobs: int = 100):
self._api = api
self._account_id = account_id
self.throttle_limit = throttle_limit
self.max_jobs = max_jobs
self._current_throttle: float = 0.0
self._inflight: int = 0
# --- Throttle ---
def refresh_throttle(self) -> None:
"""
Ping the account to refresh the `x-fb-ads-insights-throttle` header and cache the value.
NOTE: This is inexpensive (empty insights call) and safe to perform before scheduling.
"""
self._api.get_account(account_id=self._account_id).get_insights()
t = self._api.api.ads_insights_throttle
# Use the stricter of the two numbers.
self._current_throttle = max(getattr(t, "per_account", 0.0), getattr(t, "per_application", 0.0))
@property
def limit_reached(self) -> bool:
return self._inflight >= self.max_jobs or self._current_throttle >= self.throttle_limit
@property
def capacity_reached(self) -> bool:
return self._inflight >= self.max_jobs
# --- Capacity accounting ---
def release(self) -> None:
"""Called by jobs when the remote run reaches a terminal state (completed/failed/skipped/timeout)."""
if self._inflight > 0:
self._inflight -= 1
def try_consume(self) -> bool:
"""
Reserve capacity for one new job if both throttle and concurrency allow it.
Jobs should call this right before actually starting the AdReportRun.
"""
# No point in checking throttle if we are already at max concurrency.
if self.capacity_reached:
return False
self.refresh_throttle()
if self.limit_reached:
return False
self._inflight += 1
return True
# --- Introspection (optional logging) ---
@property
def inflight(self) -> int:
return self._inflight
@property
def current_throttle(self) -> float:
return self._current_throttle
|
APILimit
|
python
|
apache__airflow
|
providers/common/sql/tests/unit/common/sql/operators/test_sql.py
|
{
"start": 19437,
"end": 28277
}
|
class ____:
count_check = "COUNT(*) == 1000"
sum_check = "col_a + col_b < col_c"
checks = {
"row_count_check": {"check_statement": f"{count_check}"},
"column_sum_check": {"check_statement": f"{sum_check}"},
}
correct_generate_sql_query_no_partitions = f"""
SELECT 'row_count_check' AS check_name, MIN(row_count_check) AS check_result
FROM (SELECT CASE WHEN {count_check} THEN 1 ELSE 0 END AS row_count_check
FROM test_table ) AS sq
UNION ALL
SELECT 'column_sum_check' AS check_name, MIN(column_sum_check) AS check_result
FROM (SELECT CASE WHEN {sum_check} THEN 1 ELSE 0 END AS column_sum_check
FROM test_table ) AS sq
"""
correct_generate_sql_query_with_partition = f"""
SELECT 'row_count_check' AS check_name, MIN(row_count_check) AS check_result
FROM (SELECT CASE WHEN {count_check} THEN 1 ELSE 0 END AS row_count_check
FROM test_table WHERE col_a > 10) AS sq
UNION ALL
SELECT 'column_sum_check' AS check_name, MIN(column_sum_check) AS check_result
FROM (SELECT CASE WHEN {sum_check} THEN 1 ELSE 0 END AS column_sum_check
FROM test_table WHERE col_a > 10) AS sq
"""
correct_generate_sql_query_with_partition_and_where = f"""
SELECT 'row_count_check' AS check_name, MIN(row_count_check) AS check_result
FROM (SELECT CASE WHEN {count_check} THEN 1 ELSE 0 END AS row_count_check
FROM test_table WHERE col_a > 10 AND id = 100) AS sq
UNION ALL
SELECT 'column_sum_check' AS check_name, MIN(column_sum_check) AS check_result
FROM (SELECT CASE WHEN {sum_check} THEN 1 ELSE 0 END AS column_sum_check
FROM test_table WHERE col_a > 10) AS sq
"""
correct_generate_sql_query_with_where = f"""
SELECT 'row_count_check' AS check_name, MIN(row_count_check) AS check_result
FROM (SELECT CASE WHEN {count_check} THEN 1 ELSE 0 END AS row_count_check
FROM test_table ) AS sq
UNION ALL
SELECT 'column_sum_check' AS check_name, MIN(column_sum_check) AS check_result
FROM (SELECT CASE WHEN {sum_check} THEN 1 ELSE 0 END AS column_sum_check
FROM test_table WHERE id = 100) AS sq
"""
def _construct_operator(self, monkeypatch, checks, records):
def get_records(*arg):
return records
operator = SQLTableCheckOperator(task_id="test_task", table="test_table", checks=checks)
monkeypatch.setattr(operator, "get_db_hook", _get_mock_db_hook)
monkeypatch.setattr(MockHook, "get_records", get_records)
return operator
@pytest.mark.parametrize(
"conn_id",
[
pytest.param("postgres_default", marks=[pytest.mark.backend("postgres")]),
pytest.param("mysql_default", marks=[pytest.mark.backend("mysql")]),
],
)
def test_sql_check(self, conn_id):
operator = SQLTableCheckOperator(
task_id="test_task",
table="employees",
checks={"row_count_check": {"check_statement": "COUNT(*) >= 3"}},
conn_id=conn_id,
)
hook = operator.get_db_hook()
hook.run(
[
"""
CREATE TABLE IF NOT EXISTS employees (
employee_name VARCHAR(50) NOT NULL,
employment_year INT NOT NULL
);
""",
"INSERT INTO employees VALUES ('Adam', 2021)",
"INSERT INTO employees VALUES ('Chris', 2021)",
"INSERT INTO employees VALUES ('Frank', 2021)",
"INSERT INTO employees VALUES ('Fritz', 2021)",
"INSERT INTO employees VALUES ('Magda', 2022)",
"INSERT INTO employees VALUES ('Phil', 2021)",
]
)
try:
operator.execute({})
finally:
hook.run(["DROP TABLE employees"])
@pytest.mark.parametrize(
"conn_id",
[
pytest.param("postgres_default", marks=[pytest.mark.backend("postgres")]),
pytest.param("mysql_default", marks=[pytest.mark.backend("mysql")]),
],
)
def test_sql_check_partition_clause_templating(self, conn_id):
"""
Checks that the generated sql respects a templated partition clause
"""
operator = SQLTableCheckOperator(
task_id="test_task",
table="employees",
checks={"row_count_check": {"check_statement": "COUNT(*) = 5"}},
conn_id=conn_id,
partition_clause="employment_year = {{ params.year }}",
)
hook = operator.get_db_hook()
hook.run(
[
"""
CREATE TABLE IF NOT EXISTS employees (
employee_name VARCHAR(50) NOT NULL,
employment_year INT NOT NULL
);
""",
"INSERT INTO employees VALUES ('Adam', 2021)",
"INSERT INTO employees VALUES ('Chris', 2021)",
"INSERT INTO employees VALUES ('Frank', 2021)",
"INSERT INTO employees VALUES ('Fritz', 2021)",
"INSERT INTO employees VALUES ('Magda', 2022)",
"INSERT INTO employees VALUES ('Phil', 2021)",
]
)
try:
operator.render_template_fields({"params": {"year": 2021}})
operator.execute({})
finally:
hook.run(["DROP TABLE employees"])
def test_pass_all_checks_check(self, monkeypatch):
records = [("row_count_check", 1), ("column_sum_check", "y")]
operator = self._construct_operator(monkeypatch, self.checks, records)
operator.execute(context=MagicMock())
assert [operator.checks[check]["success"] is True for check in operator.checks.keys()]
def test_fail_all_checks_check(self, monkeypatch):
records = [("row_count_check", 0), ("column_sum_check", "n")]
operator = self._construct_operator(monkeypatch, self.checks, records)
with pytest.raises(AirflowException):
operator.execute(context=MagicMock())
def test_generate_sql_query_no_partitions(self, monkeypatch):
operator = self._construct_operator(monkeypatch, self.checks, ())
assert (
operator._generate_sql_query().lstrip() == self.correct_generate_sql_query_no_partitions.lstrip()
)
def test_generate_sql_query_with_partitions(self, monkeypatch):
operator = self._construct_operator(monkeypatch, self.checks, ())
operator.partition_clause = "col_a > 10"
assert (
operator._generate_sql_query().lstrip() == self.correct_generate_sql_query_with_partition.lstrip()
)
def test_generate_sql_query_with_templated_partitions(self, monkeypatch):
operator = self._construct_operator(monkeypatch, self.checks, ())
operator.partition_clause = "{{ params.col }} > 10"
operator.render_template_fields({"params": {"col": "col_a"}})
assert (
operator._generate_sql_query().lstrip() == self.correct_generate_sql_query_with_partition.lstrip()
)
def test_generate_sql_query_with_templated_table(self, monkeypatch):
operator = self._construct_operator(monkeypatch, self.checks, ())
operator.table = "{{ params.table }}"
operator.render_template_fields({"params": {"table": "test_table"}})
assert (
operator._generate_sql_query().lstrip() == self.correct_generate_sql_query_no_partitions.lstrip()
)
def test_generate_sql_query_with_partitions_and_check_partition(self, monkeypatch):
self.checks["row_count_check"]["partition_clause"] = "id = 100"
operator = self._construct_operator(monkeypatch, self.checks, ())
operator.partition_clause = "col_a > 10"
assert (
operator._generate_sql_query().lstrip()
== self.correct_generate_sql_query_with_partition_and_where.lstrip()
)
del self.checks["row_count_check"]["partition_clause"]
def test_generate_sql_query_with_check_partition(self, monkeypatch):
self.checks["column_sum_check"]["partition_clause"] = "id = 100"
operator = self._construct_operator(monkeypatch, self.checks, ())
assert operator._generate_sql_query().lstrip() == self.correct_generate_sql_query_with_where.lstrip()
del self.checks["column_sum_check"]["partition_clause"]
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
INTERVAL = datetime.timedelta(hours=12)
SUPPORTED_TRUE_VALUES = [
["True"],
["true"],
["1"],
["on"],
[1],
True,
"true",
"1",
"on",
1,
]
SUPPORTED_FALSE_VALUES = [
["False"],
["false"],
["0"],
["off"],
[0],
False,
"false",
"0",
"off",
0,
]
|
TestTableCheckOperator
|
python
|
pytorch__pytorch
|
torch/ao/nn/intrinsic/modules/fused.py
|
{
"start": 1817,
"end": 2384
}
|
class ____(_FusedModule):
r"""This is a sequential container which calls the Conv3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert (
type_before_parametrizations(conv) == Conv3d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(conv)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(conv, relu)
|
ConvReLU3d
|
python
|
kamyu104__LeetCode-Solutions
|
Python/count-primes.py
|
{
"start": 145,
"end": 717
}
|
class ____(object):
# @param {integer} n
# @return {integer}
def countPrimes(self, n):
if n <= 2:
return 0
is_prime = [True]*(n//2)
cnt = len(is_prime)
for i in xrange(3, n, 2):
if i * i >= n:
break
if not is_prime[i//2]:
continue
for j in xrange(i*i, n, 2*i):
if not is_prime[j//2]:
continue
cnt -= 1
is_prime[j//2] = False
return cnt
# Time: O(n)
# Space: O(n)
|
Solution
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 490191,
"end": 490571
}
|
class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("CheckRun", graphql_name="node")
"""The item at the end of the edge."""
|
CheckRunEdge
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/collective_ops_gpu_test.py
|
{
"start": 1260,
"end": 12693
}
|
class ____(test.TestCase):
@classmethod
def setUpClass(cls):
"""Set group_size = num_gpus = 2 for all tests in this class."""
super(CollectiveOpGPUTest, cls).setUpClass()
# Group size is the number of devices in a group communicating collectively.
# This will be passed into the collective ops in the tests below.
cls._group_size = 2
cls._devices = ['/device:GPU:{}'.format(i) for i in range(2)]
os.environ['NCCL_DEBUG'] = 'INFO'
os.environ['NCCL_LAUNCH_MODE'] = 'PARALLEL'
def _setup_context(self, num_gpus=2):
context._reset_context()
gpus = config.list_physical_devices('GPU')
if len(gpus) < num_gpus:
self.skipTest(
'Expected at least {} GPUs but found {} GPUs'.format(
num_gpus, len(gpus)
)
)
context.ensure_initialized()
def testBasicNcclAllReduce(self):
self._setup_context()
inputs = [
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_all_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(
collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div'
)
)
return collectives
for result in run_basic_all_reduce():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testInt32Error(self):
self._setup_context()
inputs = [[0, 1], [2, 3]]
group_key = 1
instance_key = 50
@def_function.function
def run_int32_error():
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.int32)
collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div'
)
with self.assertRaisesRegex(
errors.InternalError, 'does not support datatype DT_INT32 on DEVICE_GPU'
):
run_int32_error()
def testFp16Reduce(self):
self._setup_context()
inputs = [
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 100
@def_function.function
def run_fp16_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i], dtype=dtypes.float16)
collectives.append(
collective_ops.all_reduce(
t, self._group_size, group_key, instance_key, 'Add', 'Div'
)
)
return collectives
for result in run_fp16_reduce():
self.assertAllClose(result, expected, rtol=1e-3, atol=1e-3)
def testNcclHintAllReduce(self):
self._setup_context()
inputs = [
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
]
expected = [0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_hint_all_reduce():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(
collective_ops.all_reduce(
t,
self._group_size,
group_key,
instance_key,
'Add',
'Div',
communication_hint='nccl',
)
)
return collectives
for result in run_nccl_hint_all_reduce():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testBasicNcclBroadcast(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_nccl_broadcast():
collectives = []
with ops.device(self._devices[0]):
t = constant_op.constant(tensor_value)
collectives.append(
collective_ops.broadcast_send(
t, t.shape, t.dtype, self._group_size, group_key, instance_key
)
)
with ops.device(self._devices[1]):
t = constant_op.constant(tensor_value)
collectives.append(
collective_ops.broadcast_recv(
t.shape, t.dtype, self._group_size, group_key, instance_key
)
)
return collectives
for result in run_basic_nccl_broadcast():
self.assertAllClose(result, tensor_value, rtol=1e-5, atol=1e-5)
def testNcclBroadcastDoubleRecv(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_broadcast_double_recv():
for device in self._devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collective_ops.broadcast_recv(
t.shape, t.dtype, self._group_size, group_key, instance_key
)
with self.assertRaisesRegex(errors.InternalError, 'found no source'):
run_nccl_broadcast_double_recv()
def testNcclBroadcastDoubleSend(self):
self._setup_context()
tensor_value = [0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]
group_key = 1
instance_key = 1
@def_function.function
def run_nccl_broadcast_double_send():
for device in self._devices:
with ops.device(device):
t = constant_op.constant(tensor_value)
collective_ops.broadcast_send(
t, t.shape, t.dtype, self._group_size, group_key, instance_key
)
with self.assertRaisesRegex(errors.InternalError, 'already has source'):
run_nccl_broadcast_double_send()
def testBasicNcclAllGather(self):
self._setup_context()
inputs = [
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
]
expected = [
0.1,
1.1,
2.1,
3.1,
4.1,
5.1,
6.1,
7.1,
0.3,
1.3,
2.3,
3.3,
4.3,
5.3,
6.3,
7.3,
]
group_key = 1
instance_key = 1
@def_function.function
def run_basic_nccl_all_gather():
collectives = []
for i in range(self._group_size):
with ops.device(self._devices[i]):
t = constant_op.constant(inputs[i])
collectives.append(
collective_ops.all_gather(
t, self._group_size, group_key, instance_key
)
)
return collectives
for result in run_basic_nccl_all_gather():
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testCollectiveDeviceMismatch(self):
self._setup_context()
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
@def_function.function
def run_collective_device_mismatch():
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
collective_ops.all_reduce(
in0, self._group_size, group_key, instance_key, 'Add', 'Id'
)
with ops.device('/GPU:0'):
in1 = constant_op.constant(t1)
collective_ops.all_reduce(
in1, self._group_size, group_key, instance_key, 'Add', 'Id'
)
with self.assertRaisesRegex(
errors.InternalError, 'but that group has type'
):
run_collective_device_mismatch()
def testCollectiveReduceMinMax(self):
self._setup_context()
@def_function.function
def run_all_reduce(group_key, instance_key, merge_op):
t0 = [1.0, 20.0, 3.0, 40.0, 5.0]
t1 = [10.0, 2.0, 30.0, 4.0, 50.0]
with ops.device('/GPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0,
self._group_size,
group_key,
instance_key,
merge_op,
final_op='Id',
communication_hint='nccl',
)
with ops.device('/GPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1,
self._group_size,
group_key,
instance_key,
merge_op,
final_op='Id',
communication_hint='nccl',
)
return c0, c1
for combination in [
('Max', [10.0, 20.0, 30.0, 40.0, 50.0]),
('Min', [1.0, 2.0, 3.0, 4.0, 5.0]),
]:
merge_op = combination[0]
results = run_all_reduce(group_key=10, instance_key=20, merge_op=merge_op)
expected = combination[1]
for result in results:
self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5)
def testNcclStress(self):
self.skipTest(
'b/435404154: As we moved from NVIDIA CUDA base image to Ubuntu 22.04'
' with NVIDIA Driver 580 installed for RBE, this test is failing and'
' needs to be addressed as part of the bug.'
)
self._setup_context(num_gpus=1)
num_iters = 1000
for _ in range(num_iters):
with ops.device('/device:GPU:0'):
collective_ops.all_reduce(
[1.0],
group_size=1,
group_key=0,
instance_key=0,
merge_op='Add',
final_op='Id',
communication_hint='NCCL',
)
@test_util.run_v2_only
def testAbortNccl(self):
self._setup_context(num_gpus=2)
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant(1.0)
# First perform a normal collective to finish resolution.
def collective_fn():
for device in ['GPU:0', 'GPU:1']:
with ops.device(device):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl',
)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl',
)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
'Add',
'Id',
communication_hint='nccl',
)
t.join()
# Reset the context in order to reset the collective executor.
context._reset_context() # pylint: disable=protected-access
def_function.function(collective_fn)()
if __name__ == '__main__':
test.main()
|
CollectiveOpGPUTest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py
|
{
"start": 10082,
"end": 10218
}
|
class ____:
if True:
print("conditional")
def test():
pass
# end
# Test case for nested class scenario
|
Foo
|
python
|
great-expectations__great_expectations
|
great_expectations/datasource/fluent/config_str.py
|
{
"start": 3938,
"end": 9524
}
|
class ____(AnyUrl, ConfigStr): # type: ignore[misc] # Mixin "validate" signature mismatch
"""
Special type that enables great_expectation config variable substitution for the
`user` and `password` section of a URI.
Example:
```
"snowflake://${MY_USER}:${MY_PASSWORD}@account/database/schema/table"
```
Note: this type is meant to used as part of pydantic model.
To use this outside of a model see the pydantic docs below.
https://docs.pydantic.dev/usage/models/#parsing-data-into-a-specified-type
"""
ALLOWED_SUBSTITUTIONS: ClassVar[set[UriParts]] = {"user", "password"}
min_length: int = 1
max_length: int = 2**16
def __init__( # noqa: PLR0913 # for compatibility with AnyUrl
self,
template_str: str,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: Optional[str] = None,
tld: Optional[str] = None,
host_type: str = "domain",
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> None:
if template_str: # may have already been set in __new__
self.template_str: str = template_str
self._secret_value = template_str # for compatibility with SecretStr
super().__init__(
template_str,
scheme=scheme,
user=user,
password=password,
host=host,
tld=tld,
host_type=host_type,
port=port,
path=path,
query=query,
fragment=fragment,
)
def __new__(cls: type[Self], template_str: Optional[str], **kwargs) -> Self:
"""custom __new__ for compatibility with pydantic.parse_obj_as()"""
built_url = cls.build(**kwargs) if template_str is None else template_str
instance = str.__new__(cls, built_url)
instance.template_str = str(instance)
return instance
@property
def params(self) -> dict[str, list[str]]:
"""The query parameters as a dictionary."""
if not self.query:
return {}
return urllib.parse.parse_qs(self.query)
@classmethod
@override
def validate_parts(cls, parts: UriPartsDict, validate_port: bool = True) -> UriPartsDict:
"""
Ensure that only the `user` and `password` parts have config template strings.
Also validate that all parts of the URI are valid.
"""
allowed_substitutions = sorted(cls.ALLOWED_SUBSTITUTIONS)
for name, part in parts.items():
if not part:
continue
if (
cls.str_contains_config_template(part) # type: ignore[arg-type] # is str
and name not in cls.ALLOWED_SUBSTITUTIONS
):
raise ValueError( # noqa: TRY003 # FIXME CoP
f"Only {', '.join(allowed_substitutions)} may use config substitution; '{name}'"
" substitution not allowed"
)
return AnyUrl.validate_parts(parts, validate_port)
@override
def get_config_value(self, config_provider: _ConfigurationProvider) -> AnyUrl:
"""
Resolve the config template string to its string value according to the passed
_ConfigurationProvider.
Parse the resolved URI string into an `AnyUrl` object.
"""
LOGGER.info(f"Substituting '{self}'")
raw_value = config_provider.substitute_config(self.template_str)
return parse_obj_as(AnyUrl, raw_value)
@classmethod
@override
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield ConfigStr.validate_template_str_format
yield cls.validate # equivalent to AnyUrl.validate
@classmethod
@override
def __modify_schema__(cls, field_schema: dict) -> None:
"""Update the generated schema when used in a pydantic model."""
ConfigStr.__modify_schema__(field_schema)
AnyUrl.__modify_schema__(field_schema)
field_schema.update(
{
"description": "Contains config templates for user:password in a URI"
" to be substituted at runtime. Runtime values will never be serialized.",
"examples": [
"snowflake://dickens:${PASSWORD}@host/db/schema",
"snowflake://${USER}:${PASSWORD}@host/db/schema",
],
}
)
def _check_config_substitutions_needed(
datasource: Datasource,
options: Mapping,
raise_warning_if_provider_not_present: bool,
) -> set[str]:
"""
Given a Datasource and a dict-like mapping type return the keys whose value is a `ConfigStr` type.
Optionally raise a warning if config substitution is needed but impossible due to a missing `_config_provider`.
""" # noqa: E501 # FIXME CoP
need_config_subs: set[str] = {k for (k, v) in options.items() if isinstance(v, ConfigStr)}
if (
need_config_subs
and raise_warning_if_provider_not_present
and not datasource._config_provider
):
warnings.warn(
f"config variables '{','.join(need_config_subs)}' need substitution but no `_ConfigurationProvider` is present" # noqa: E501 # FIXME CoP
)
return need_config_subs
|
ConfigUri
|
python
|
walkccc__LeetCode
|
solutions/1296. Divide Array in Sets of K Consecutive Numbers/1296.py
|
{
"start": 0,
"end": 336
}
|
class ____:
def isPossibleDivide(self, nums: list[int], k: int) -> bool:
count = collections.Counter(nums)
for start in sorted(count):
value = count[start]
if value > 0:
for i in range(start, start + k):
count[i] -= value
if count[i] < 0:
return False
return True
|
Solution
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/log_manager.py
|
{
"start": 11471,
"end": 18051
}
|
class ____(logging.Logger):
"""Centralized dispatch for logging from user code.
Handles the construction of uniform structured log messages and passes them through to the
underlying loggers/handlers.
An instance of the log manager is made available to ops as ``context.log``. Users should not
initialize instances of the log manager directly. To configure custom loggers, set the
``logger_defs`` argument in an `@job` decorator or when calling the `to_job()` method on a
:py:class:`GraphDefinition`.
The log manager inherits standard convenience methods like those exposed by the Python standard
library :py:mod:`python:logging` module (i.e., within the body of an op,
``context.log.{debug, info, warning, warn, error, critical, fatal}``).
The underlying integer API can also be called directly using, e.g.
``context.log.log(5, msg)``, and the log manager will delegate to the ``log`` method
defined on each of the loggers it manages.
User-defined custom log levels are not supported, and calls to, e.g.,
``context.log.trace`` or ``context.log.notice`` will result in hard exceptions **at runtime**.
"""
def __init__(
self,
dagster_handler: DagsterLogHandler,
level: int = logging.NOTSET,
managed_loggers: Optional[Sequence[logging.Logger]] = None,
):
super().__init__(name="dagster", level=coerce_valid_log_level(level))
self._managed_loggers = check.opt_sequence_param(
managed_loggers, "managed_loggers", of_type=logging.Logger
)
self._dagster_handler = dagster_handler
self.addHandler(dagster_handler)
@classmethod
def create(
cls,
loggers: Sequence[logging.Logger],
handlers: Optional[Sequence[logging.Handler]] = None,
instance: Optional["DagsterInstance"] = None,
dagster_run: Optional["DagsterRun"] = None,
) -> "DagsterLogManager":
"""Create a DagsterLogManager with a set of subservient loggers."""
handlers = check.opt_sequence_param(handlers, "handlers", of_type=logging.Handler)
managed_loggers = [get_dagster_logger()]
python_log_level = logging.NOTSET
if instance:
handlers = [*handlers, *instance.get_handlers()]
managed_loggers += [
logging.getLogger(lname) if lname != "root" else logging.getLogger()
for lname in instance.managed_python_loggers
]
if instance.python_log_level is not None:
python_log_level = coerce_valid_log_level(instance.python_log_level)
# set all loggers to the declared logging level
for logger in managed_loggers:
logger.setLevel(python_log_level)
handler_metadata = DagsterLogHandlerMetadata(
run_id=dagster_run.run_id if dagster_run else None,
job_name=dagster_run.job_name if dagster_run else None,
job_tags=dagster_run.tags if dagster_run else {},
# These will be set on handlers for individual steps
step_key=None,
op_name=None,
resource_name=None,
resource_fn_name=None,
)
return cls(
dagster_handler=DagsterLogHandler(
metadata=handler_metadata,
loggers=loggers,
handlers=handlers,
),
level=python_log_level,
managed_loggers=managed_loggers,
)
@property
def metadata(self) -> DagsterLogHandlerMetadata:
return self._dagster_handler.metadata
def begin_python_log_capture(self) -> None:
for logger in self._managed_loggers:
logger.addHandler(self._dagster_handler)
def end_python_log_capture(self) -> None:
for logger in self._managed_loggers:
logger.removeHandler(self._dagster_handler)
def log_dagster_event(
self,
level: Union[str, int],
msg: str,
dagster_event: "DagsterEvent",
batch_metadata: Optional["DagsterEventBatchMetadata"] = None,
) -> None:
"""Log a DagsterEvent at the given level. Attributes about the context it was logged in
(such as the asset or job name) will be automatically attached to the created record.
Args:
level (str, int): either a string representing the desired log level ("INFO", "WARN"),
or an integer level such as logging.INFO or logging.DEBUG.
msg (str): message describing the event
dagster_event (DagsterEvent): DagsterEvent that will be logged
batch_metadata (BatchMetadata): Metadata about the batch that the event is a part of.
"""
self.log(
level=level,
msg=msg,
extra={
LOG_RECORD_EVENT_ATTR: dagster_event,
LOG_RECORD_EVENT_BATCH_METADATA_ATTR: batch_metadata,
},
)
def log(
self,
level: Union[str, int],
msg: object,
*args: Any,
**kwargs: Any,
) -> None:
"""Log a message at the given level. Attributes about the context it was logged in (such as
the asset or job name) will be automatically attached to the created record.
Args:
level (str, int): either a string representing the desired log level ("INFO", "WARN"),
or an integer level such as logging.INFO or logging.DEBUG.
msg (str): the message to be logged
*args: the logged message will be msg % args
"""
level = coerce_valid_log_level(level)
# log DagsterEvents regardless of level
if self.isEnabledFor(level) or (
"extra" in kwargs and LOG_RECORD_EVENT_ATTR in kwargs["extra"]
):
self._log(level, msg, args, **kwargs)
def with_tags(self, **new_tags: str) -> "DagsterLogManager":
"""Add new tags in "new_tags" to the set of tags attached to this log manager instance, and
return a new DagsterLogManager with the merged set of tags.
Args:
new_tags (Dict[str,str]): Dictionary of tags
Returns:
DagsterLogManager: a new DagsterLogManager namedtuple with updated tags for the same
run ID and loggers.
"""
return DagsterLogManager(
dagster_handler=self._dagster_handler.with_tags(**new_tags),
managed_loggers=self._managed_loggers,
level=self.level,
)
|
DagsterLogManager
|
python
|
pytorch__pytorch
|
torch/package/file_structure_representation.py
|
{
"start": 103,
"end": 4746
}
|
class ____:
"""A file structure representation. Organized as Directory nodes that have lists of
their Directory children. Directories for a package are created by calling
:meth:`PackageImporter.file_structure`."""
def __init__(self, name: str, is_dir: bool):
self.name = name
self.is_dir = is_dir
self.children: dict[str, Directory] = {}
def _get_dir(self, dirs: list[str]) -> "Directory":
"""Builds path of Directories if not yet built and returns last directory
in list.
Args:
dirs (List[str]): List of directory names that are treated like a path.
Returns:
:class:`Directory`: The last Directory specified in the dirs list.
"""
if len(dirs) == 0:
return self
dir_name = dirs[0]
if dir_name not in self.children:
self.children[dir_name] = Directory(dir_name, True)
return self.children[dir_name]._get_dir(dirs[1:])
def _add_file(self, file_path: str):
"""Adds a file to a Directory.
Args:
file_path (str): Path of file to add. Last element is added as a file while
other paths items are added as directories.
"""
*dirs, file = file_path.split("/")
dir = self._get_dir(dirs)
dir.children[file] = Directory(file, False)
def has_file(self, filename: str) -> bool:
"""Checks if a file is present in a :class:`Directory`.
Args:
filename (str): Path of file to search for.
Returns:
bool: If a :class:`Directory` contains the specified file.
"""
lineage = filename.split("/", maxsplit=1)
child = lineage[0]
grandchildren = lineage[1] if len(lineage) > 1 else None
if child in self.children:
if grandchildren is None:
return True
else:
return self.children[child].has_file(grandchildren)
return False
def __str__(self):
str_list: list[str] = []
self._stringify_tree(str_list)
return "".join(str_list)
def _stringify_tree(
self,
str_list: list[str],
preamble: str = "",
dir_ptr: str = "\u2500\u2500\u2500 ",
):
"""Recursive method to generate print-friendly version of a Directory."""
space = " "
branch = "\u2502 "
tee = "\u251c\u2500\u2500 "
last = "\u2514\u2500\u2500 "
# add this directory's representation
str_list.append(f"{preamble}{dir_ptr}{self.name}\n")
# add directory's children representations
if dir_ptr == tee:
preamble = preamble + branch
else:
preamble = preamble + space
file_keys: list[str] = []
dir_keys: list[str] = []
for key, val in self.children.items():
if val.is_dir:
dir_keys.append(key)
else:
file_keys.append(key)
for index, key in enumerate(sorted(dir_keys)):
if (index == len(dir_keys) - 1) and len(file_keys) == 0:
self.children[key]._stringify_tree(str_list, preamble, last)
else:
self.children[key]._stringify_tree(str_list, preamble, tee)
for index, file in enumerate(sorted(file_keys)):
pointer = last if (index == len(file_keys) - 1) else tee
str_list.append(f"{preamble}{pointer}{file}\n")
def _create_directory_from_file_list(
filename: str,
file_list: list[str],
include: "GlobPattern" = "**",
exclude: "GlobPattern" = (),
) -> Directory:
"""Return a :class:`Directory` file structure representation created from a list of files.
Args:
filename (str): The name given to the top-level directory that will be the
relative root for all file paths found in the file_list.
file_list (List[str]): List of files to add to the top-level directory.
include (Union[List[str], str]): An optional pattern that limits what is included from the file_list to
files whose name matches the pattern.
exclude (Union[List[str], str]): An optional pattern that excludes files whose name match the pattern.
Returns:
:class:`Directory`: a :class:`Directory` file structure representation created from a list of files.
"""
glob_pattern = GlobGroup(include, exclude=exclude, separator="/")
top_dir = Directory(filename, True)
for file in file_list:
if glob_pattern.matches(file):
top_dir._add_file(file)
return top_dir
|
Directory
|
python
|
astropy__astropy
|
astropy/io/votable/tree.py
|
{
"start": 64610,
"end": 66592
}
|
class ____(SimpleElement, _UtypeProperty, _UcdProperty):
"""
FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.
"""
_attr_list_11 = ["ref"]
_attr_list_12 = _attr_list_11 + ["ucd", "utype"]
_element_name = "FIELDref"
_utype_in_v1_2 = True
_ucd_in_v1_2 = True
def __init__(
self, table, ref, ucd=None, utype=None, config=None, pos=None, **extra
):
"""
*table* is the :class:`TableElement` object that this :class:`FieldRef`
is a member of.
*ref* is the ID to reference a :class:`Field` object defined
elsewhere.
"""
if config is None:
config = {}
self._config = config
self._pos = pos
SimpleElement.__init__(self)
self._table = table
self.ref = ref
self.ucd = ucd
self.utype = utype
if config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if ucd is not None:
warn_unknown_attrs(self._element_name, ["ucd"], config, pos)
if utype is not None:
warn_unknown_attrs(self._element_name, ["utype"], config, pos)
@property
def ref(self):
"""The ID_ of the FIELD_ that this FIELDref_ references."""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
def get_ref(self):
"""
Lookup the :class:`Field` instance that this :class:`FieldRef`
references.
"""
for field in self._table._votable.iter_fields_and_params():
if isinstance(field, Field) and field.ID == self.ref:
return field
vo_raise(KeyError, f"No field named '{self.ref}'", self._config, self._pos)
|
FieldRef
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/feature_preprocessing/select_rates_regression.py
|
{
"start": 489,
"end": 4039
}
|
class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(
self, alpha, mode="percentile", score_func="f_regression", random_state=None
):
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.alpha = alpha
self.mode = mode
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info_regression":
self.score_func = partial(
sklearn.feature_selection.mutual_info_regression,
random_state=self.random_state,
)
# Mutual info consistently crashes if percentile is not the mode
self.mode = "percentile"
else:
raise ValueError(
"score_func must be in ('f_regression, 'mutual_info_regression') "
"for task=regression "
"but is: %s " % (score_func)
)
def fit(self, X, y):
import sklearn.feature_selection
self.alpha = float(self.alpha)
self.preprocessor = sklearn.feature_selection.GenericUnivariateSelect(
score_func=self.score_func, param=self.alpha, mode=self.mode
)
self.preprocessor.fit(X, y)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError()
try:
Xt = self.preprocessor.transform(X)
except ValueError as e:
if (
"zero-size array to reduction operation maximum which has no "
"identity" in e.message
):
raise ValueError("%s removed all features." % self.__class__.__name__)
else:
raise e
if Xt.shape[1] == 0:
raise ValueError("%s removed all features." % self.__class__.__name__)
return Xt
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "SR",
"name": "Univariate Feature Selection based on rates",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": True,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
alpha = UniformFloatHyperparameter(
name="alpha", lower=0.01, upper=0.5, default_value=0.1
)
if dataset_properties is not None and dataset_properties.get("sparse"):
choices = ["mutual_info_regression", "f_regression"]
else:
choices = ["f_regression"]
score_func = CategoricalHyperparameter(
name="score_func", choices=choices, default_value="f_regression"
)
mode = CategoricalHyperparameter("mode", ["fpr", "fdr", "fwe"], "fpr")
cs = ConfigurationSpace()
cs.add_hyperparameter(alpha)
cs.add_hyperparameter(score_func)
cs.add_hyperparameter(mode)
# Mutual info consistently crashes if percentile is not the mode
if "mutual_info_regression" in choices:
cond = NotEqualsCondition(mode, score_func, "mutual_info_regression")
cs.add_condition(cond)
return cs
|
SelectRegressionRates
|
python
|
pytest-dev__pytest
|
testing/_py/test_local.py
|
{
"start": 50628,
"end": 51576
}
|
class ____:
def test_join_ensure(self, tmpdir, monkeypatch):
if "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.join(part) == y
def test_listdir(self, tmpdir):
if "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.listdir(part)[0] == y
@pytest.mark.xfail(reason="changing read/write might break existing usages")
def test_read_write(self, tmpdir):
x = tmpdir.join("hello")
part = "hällo"
with ignore_encoding_warning():
x.write(part)
assert x.read() == part
x.write(part.encode(sys.getdefaultencoding()))
assert x.read() == part.encode(sys.getdefaultencoding())
|
TestUnicode
|
python
|
google__flatbuffers
|
python/flatbuffers/reflection/AdvancedFeatures.py
|
{
"start": 173,
"end": 322
}
|
class ____(object):
AdvancedArrayFeatures = 1
AdvancedUnionFeatures = 2
OptionalScalars = 4
DefaultVectorsAndStrings = 8
|
AdvancedFeatures
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_array_coercion.py
|
{
"start": 52,
"end": 1665
}
|
class ____(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=None)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=None, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.asarray(array_like, dtype=self.int64)
def time_asarray_dtype_order(self, array_like):
np.asarray(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asanyarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.asanyarray(array_like, dtype=self.int64)
def time_asanyarray_dtype_order(self, array_like):
np.asanyarray(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
ArrayCoercionSmall
|
python
|
django__django
|
tests/cache/tests.py
|
{
"start": 111483,
"end": 112855
}
|
class ____(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key("a.fragment")
self.assertEqual(
key, "template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e"
)
def test_with_one_vary_on(self):
key = make_template_fragment_key("foo", ["abc"])
self.assertEqual(key, "template.cache.foo.493e283d571a73056196f1a68efd0f66")
def test_with_many_vary_on(self):
key = make_template_fragment_key("bar", ["abc", "def"])
self.assertEqual(key, "template.cache.bar.17c1a507a0cb58384f4c639067a93520")
def test_proper_escaping(self):
key = make_template_fragment_key("spam", ["abc:def%"])
self.assertEqual(key, "template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc")
def test_with_ints_vary_on(self):
key = make_template_fragment_key("foo", [1, 2, 3, 4, 5])
self.assertEqual(key, "template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461")
def test_with_unicode_vary_on(self):
key = make_template_fragment_key("foo", ["42º", "😀"])
self.assertEqual(key, "template.cache.foo.7ced1c94e543668590ba39b3c08b0237")
def test_long_vary_on(self):
key = make_template_fragment_key("foo", ["x" * 10000])
self.assertEqual(key, "template.cache.foo.3670b349b5124aa56bdb50678b02b23a")
|
TestMakeTemplateFragmentKey
|
python
|
gevent__gevent
|
src/gevent/pywsgi.py
|
{
"start": 56929,
"end": 60224
}
|
class ____(Environ):
"""
An environment that does not print its keys and values
by default.
Provisional API.
This is intended to keep potentially sensitive information like
HTTP authorization and cookies from being inadvertently printed
or logged.
For debugging, each instance can have its *secure_repr* attribute
set to ``False``, which will cause it to print like a normal dict.
When *secure_repr* is ``True`` (the default), then the value of
the *whitelist_keys* attribute is consulted; if this value is
true-ish, it should be a container (something that responds to
``in``) of key names (typically a list or set). Keys and values in
this dictionary that are in *whitelist_keys* will then be printed,
while all other values will be masked. These values may be
customized on the class by setting the *default_secure_repr* and
*default_whitelist_keys*, respectively::
>>> environ = SecureEnviron(key='value')
>>> environ # doctest: +ELLIPSIS
<pywsgi.SecureEnviron dict (keys: 1) at ...
If we whitelist the key, it gets printed::
>>> environ.whitelist_keys = {'key'}
>>> environ
{'key': 'value'}
A non-whitelisted key (*only*, to avoid doctest issues) is masked::
>>> environ['secure'] = 'secret'; del environ['key']
>>> environ
{'secure': '<MASKED>'}
We can turn it off entirely for the instance::
>>> environ.secure_repr = False
>>> environ
{'secure': 'secret'}
We can also customize it at the class level (here we use a new
class to be explicit and to avoid polluting the true default
values; we would set this class to be the ``environ_class`` of the
server)::
>>> class MyEnviron(SecureEnviron):
... default_whitelist_keys = ('key',)
...
>>> environ = MyEnviron({'key': 'value'})
>>> environ
{'key': 'value'}
.. versionadded:: 1.2a1
"""
default_secure_repr = True
default_whitelist_keys = ()
default_print_masked_keys = True
# Allow instances to override the class values,
# but inherit from the class if not present. Keeps instances
# small since we can't combine __slots__ with class attributes
# of the same name.
__slots__ = ('secure_repr', 'whitelist_keys', 'print_masked_keys')
def __getattr__(self, name):
if name in SecureEnviron.__slots__:
return getattr(type(self), 'default_' + name)
raise AttributeError(name)
def __repr__(self):
if self.secure_repr:
whitelist = self.whitelist_keys
print_masked = self.print_masked_keys
if whitelist:
safe = {k: self[k] if k in whitelist else "<MASKED>"
for k in self
if k in whitelist or print_masked}
safe_repr = repr(safe)
if not print_masked and len(safe) != len(self):
safe_repr = safe_repr[:-1] + ", (hidden keys: %d)}" % (len(self) - len(safe))
return safe_repr
return "<pywsgi.SecureEnviron dict (keys: %d) at %s>" % (len(self), id(self))
return Environ.__repr__(self)
__str__ = __repr__
|
SecureEnviron
|
python
|
apache__airflow
|
providers/elasticsearch/src/airflow/providers/elasticsearch/log/es_response.py
|
{
"start": 2871,
"end": 3430
}
|
class ____(AttributeDict):
"""
The HitMeta class is used to manage and access metadata of a document.
This class inherits from the AttributeDict class and provides
attribute-like access to its elements.
"""
def __init__(self, document, exclude=("_source", "_fields")):
d = {k[1:] if k.startswith("_") else k: v for (k, v) in document.items() if k not in exclude}
if "type" in d:
# make sure we are consistent everywhere in python
d["doc_type"] = d.pop("type")
super().__init__(d)
|
HitMeta
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_13/tasks.py
|
{
"start": 44950,
"end": 71479
}
|
class ____(NonStrictDataModel):
"""
:param id: Task id
:type id: str
:param name: Task Name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company ID
:type company: str
:param type: Type of task. Values: 'training', 'testing'
:type type: TaskTypeEnum
:param status:
:type status: TaskStatusEnum
:param comment: Free text comment
:type comment: str
:param created: Task creation time (UTC)
:type created: datetime.datetime
:param started: Task start time (UTC)
:type started: datetime.datetime
:param completed: Task end time (UTC)
:type completed: datetime.datetime
:param active_duration: Task duration time (seconds)
:type active_duration: int
:param parent: Parent task id
:type parent: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param output: Task output params
:type output: Output
:param execution: Task execution params
:type execution: Execution
:param container: Docker container parameters
:type container: dict
:param models: Task models
:type models: TaskModels
:param script: Script info
:type script: Script
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param status_changed: Last status change time
:type status_changed: datetime.datetime
:param status_message: free text string representing info about the status
:type status_message: str
:param status_reason: Reason for last status change
:type status_reason: str
:param published: Last status change time
:type published: datetime.datetime
:param last_worker: ID of last worker that handled the task
:type last_worker: str
:param last_worker_report: Last time a worker reported while working on this
task
:type last_worker_report: datetime.datetime
:param last_update: Last time this task was created, edited, changed or events
for this task were reported
:type last_update: datetime.datetime
:param last_change: Last time any update was done to the task
:type last_change: datetime.datetime
:param last_iteration: Last iteration reported for this task
:type last_iteration: int
:param last_metrics: Last metric variants (hash to events), one for each metric
hash
:type last_metrics: dict
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_schema = {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {"description": "Free text comment", "type": ["string", "null"]},
"company": {"description": "Company ID", "type": ["string", "null"]},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": ["string", "null"]},
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [{"$ref": "#/definitions/execution"}, {"type": "null"}],
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {"$ref": "#/definitions/last_metrics_variants"},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, edited, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [{"$ref": "#/definitions/task_models"}, {"type": "null"}],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {"description": "Parent task id", "type": ["string", "null"]},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"runtime": {
"description": "Task runtime mapping",
"type": ["object", "null"],
"additionalProperties": True,
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [{"$ref": "#/definitions/task_status_enum"}, {"type": "null"}],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'training', 'testing'",
"oneOf": [{"$ref": "#/definitions/task_type_enum"}, {"type": "null"}],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
type: Any = None,
status: Any = None,
comment: Optional[str] = None,
created: Optional[str] = None,
started: Optional[str] = None,
completed: Optional[str] = None,
active_duration: Optional[int] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
output: Any = None,
execution: Any = None,
container: Optional[dict] = None,
models: Any = None,
script: Any = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
status_changed: Optional[str] = None,
status_message: Optional[str] = None,
status_reason: Optional[str] = None,
published: Optional[str] = None,
last_worker: Optional[str] = None,
last_worker_report: Optional[str] = None,
last_update: Optional[str] = None,
last_change: Optional[str] = None,
last_iteration: Optional[int] = None,
last_metrics: Optional[dict] = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
runtime: Optional[dict] = None,
**kwargs: Any
) -> None:
super(Task, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.type = type
self.status = status
self.comment = comment
self.created = created
self.started = started
self.completed = completed
self.active_duration = active_duration
self.parent = parent
self.project = project
self.output = output
self.execution = execution
self.container = container
self.models = models
self.script = script
self.tags = tags
self.system_tags = system_tags
self.status_changed = status_changed
self.status_message = status_message
self.status_reason = status_reason
self.published = published
self.last_worker = last_worker
self.last_worker_report = last_worker_report
self.last_update = last_update
self.last_change = last_change
self.last_iteration = last_iteration
self.last_metrics = last_metrics
self.hyperparams = hyperparams
self.configuration = configuration
self.runtime = runtime
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("status")
def status(self) -> Any:
return self._property_status
@status.setter
def status(self, value: Any) -> None:
if value is None:
self._property_status = None
return
if isinstance(value, six.string_types):
try:
value = TaskStatusEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "status", enum.Enum)
self._property_status = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("started")
def started(self) -> Optional[str]:
return self._property_started
@started.setter
def started(self, value: Optional[str]) -> None:
if value is None:
self._property_started = None
return
self.assert_isinstance(value, "started", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_started = value
@schema_property("completed")
def completed(self) -> Optional[str]:
return self._property_completed
@completed.setter
def completed(self, value: Optional[str]) -> None:
if value is None:
self._property_completed = None
return
self.assert_isinstance(value, "completed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_completed = value
@schema_property("active_duration")
def active_duration(self) -> Optional[int]:
return self._property_active_duration
@active_duration.setter
def active_duration(self, value: Optional[int]) -> None:
if value is None:
self._property_active_duration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "active_duration", six.integer_types)
self._property_active_duration = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output")
def output(self) -> Any:
return self._property_output
@output.setter
def output(self, value: Any) -> None:
if value is None:
self._property_output = None
return
if isinstance(value, dict):
value = Output.from_dict(value)
else:
self.assert_isinstance(value, "output", Output)
self._property_output = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("container")
def container(self) -> Optional[dict]:
return self._property_container
@container.setter
def container(self, value: Optional[dict]) -> None:
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
@schema_property("models")
def models(self) -> Any:
return self._property_models
@models.setter
def models(self, value: Any) -> None:
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status_changed")
def status_changed(self) -> Optional[str]:
return self._property_status_changed
@status_changed.setter
def status_changed(self, value: Optional[str]) -> None:
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_status_changed = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("published")
def published(self) -> Optional[str]:
return self._property_published
@published.setter
def published(self, value: Optional[str]) -> None:
if value is None:
self._property_published = None
return
self.assert_isinstance(value, "published", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_published = value
@schema_property("last_worker")
def last_worker(self) -> Optional[str]:
return self._property_last_worker
@last_worker.setter
def last_worker(self, value: Optional[str]) -> None:
if value is None:
self._property_last_worker = None
return
self.assert_isinstance(value, "last_worker", six.string_types)
self._property_last_worker = value
@schema_property("last_worker_report")
def last_worker_report(self) -> Optional[str]:
return self._property_last_worker_report
@last_worker_report.setter
def last_worker_report(self, value: Optional[str]) -> None:
if value is None:
self._property_last_worker_report = None
return
self.assert_isinstance(value, "last_worker_report", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_worker_report = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("last_change")
def last_change(self) -> Optional[str]:
return self._property_last_change
@last_change.setter
def last_change(self, value: Optional[str]) -> None:
if value is None:
self._property_last_change = None
return
self.assert_isinstance(value, "last_change", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_change = value
@schema_property("last_iteration")
def last_iteration(self) -> Optional[int]:
return self._property_last_iteration
@last_iteration.setter
def last_iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_last_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_iteration", six.integer_types)
self._property_last_iteration = value
@schema_property("last_metrics")
def last_metrics(self) -> Optional[dict]:
return self._property_last_metrics
@last_metrics.setter
def last_metrics(self, value: Optional[dict]) -> None:
if value is None:
self._property_last_metrics = None
return
self.assert_isinstance(value, "last_metrics", (dict,))
self._property_last_metrics = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
@schema_property("runtime")
def runtime(self) -> Optional[dict]:
return self._property_runtime
@runtime.setter
def runtime(self, value: Optional[dict]) -> None:
if value is None:
self._property_runtime = None
return
self.assert_isinstance(value, "runtime", dict)
self._property_runtime = value
|
Task
|
python
|
getsentry__sentry
|
src/sentry/issue_detection/detectors/http_overhead_detector.py
|
{
"start": 734,
"end": 877
}
|
class ____:
"""
Keep span data that will be used to store the problem together.
"""
span: Span
delay: float
|
ProblemIndicator
|
python
|
openai__openai-python
|
src/openai/types/vector_stores/vector_store_file.py
|
{
"start": 547,
"end": 2261
}
|
class ____(BaseModel):
id: str
"""The identifier, which can be referenced in API endpoints."""
created_at: int
"""The Unix timestamp (in seconds) for when the vector store file was created."""
last_error: Optional[LastError] = None
"""The last error associated with this vector store file.
Will be `null` if there are no errors.
"""
object: Literal["vector_store.file"]
"""The object type, which is always `vector_store.file`."""
status: Literal["in_progress", "completed", "cancelled", "failed"]
"""
The status of the vector store file, which can be either `in_progress`,
`completed`, `cancelled`, or `failed`. The status `completed` indicates that the
vector store file is ready for use.
"""
usage_bytes: int
"""The total vector store usage in bytes.
Note that this may be different from the original file size.
"""
vector_store_id: str
"""
The ID of the
[vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
that the [File](https://platform.openai.com/docs/api-reference/files) is
attached to.
"""
attributes: Optional[Dict[str, Union[str, float, bool]]] = None
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard. Keys are
strings with a maximum length of 64 characters. Values are strings with a
maximum length of 512 characters, booleans, or numbers.
"""
chunking_strategy: Optional[FileChunkingStrategy] = None
"""The strategy used to chunk the file."""
|
VectorStoreFile
|
python
|
EpistasisLab__tpot
|
tpot/builtin_modules/passkbinsdiscretizer.py
|
{
"start": 2385,
"end": 3982
}
|
class ____(TransformerMixin, BaseEstimator ):
def __init__(self, n_bins=5, encode='onehot-dense', strategy='quantile', subsample=None, random_state=None):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
self.subsample = subsample
self.random_state = random_state
"""
Same as sklearn.preprocessing.KBinsDiscretizer, but passes through columns that are not discretized due to having fewer than n_bins unique values instead of ignoring them.
See sklearn.preprocessing.KBinsDiscretizer for more information.
"""
def fit(self, X, y=None):
# Identify columns with more than n unique values
# Create a ColumnTransformer to select and discretize the chosen columns
self.selected_columns_ = select_features(X, min_unique=10)
if isinstance(X, pd.DataFrame):
self.not_selected_columns_ = [col for col in X.columns if col not in self.selected_columns_]
else:
self.not_selected_columns_ = [i for i in range(X.shape[1]) if i not in self.selected_columns_]
enc = KBinsDiscretizer(n_bins=self.n_bins, encode=self.encode, strategy=self.strategy, subsample=self.subsample, random_state=self.random_state)
self.transformer = ColumnTransformer([
('discretizer', enc, self.selected_columns_),
('passthrough', 'passthrough', self.not_selected_columns_)
])
self.transformer.fit(X)
return self
def transform(self, X):
return self.transformer.transform(X)
|
PassKBinsDiscretizer
|
python
|
simonw__datasette
|
tests/plugins/my_plugin.py
|
{
"start": 5166,
"end": 17933
}
|
class ____(Facet):
type = "dummy"
async def suggest(self):
columns = await self.get_columns(self.sql, self.params)
return (
[
{
"name": column,
"toggle_url": self.ds.absolute_url(
self.request,
path_with_added_args(self.request, {"_facet_dummy": column}),
),
"type": "dummy",
}
for column in columns
]
if self.request.args.get("_dummy_facet")
else []
)
async def facet_results(self):
facet_results = {}
facets_timed_out = []
return facet_results, facets_timed_out
@hookimpl
def actor_from_request(datasette, request):
if request.args.get("_bot"):
return {"id": "bot"}
else:
return None
@hookimpl
def asgi_wrapper():
def wrap(app):
async def maybe_set_actor_in_scope(scope, receive, send):
if b"_actor_in_scope" in scope.get("query_string", b""):
scope = dict(scope, actor={"id": "from-scope"})
print(scope)
await app(scope, receive, send)
return maybe_set_actor_in_scope
return wrap
@hookimpl
def register_routes():
async def one(datasette):
return Response.text(
(await datasette.get_database().execute("select 1 + 1")).first()[0]
)
async def two(request):
name = request.url_vars["name"]
greeting = request.args.get("greeting")
return Response.text(f"{greeting} {name}")
async def three(scope, send):
await asgi_send_json(
send, {"hello": "world"}, status=200, headers={"x-three": "1"}
)
async def post(request):
if request.method == "GET":
return Response.html(request.scope["csrftoken"]())
else:
return Response.json(await request.post_vars())
async def csrftoken_form(request, datasette):
return Response.html(
await datasette.render_template("csrftoken_form.html", request=request)
)
def not_async():
return Response.html("This was not async")
def add_message(datasette, request):
datasette.add_message(request, "Hello from messages")
return Response.html("Added message")
async def render_message(datasette, request):
return Response.html(
await datasette.render_template("render_message.html", request=request)
)
def login_as_root(datasette, request):
# Mainly for the latest.datasette.io demo
if request.method == "POST":
response = Response.redirect("/")
datasette.set_actor_cookie(response, {"id": "root"})
return response
return Response.html(
"""
<form action="{}" method="POST">
<p>
<input type="hidden" name="csrftoken" value="{}">
<input type="submit"
value="Sign in as root user"
style="font-size: 2em; padding: 0.1em 0.5em;">
</p>
</form>
""".format(
request.path, request.scope["csrftoken"]()
)
)
def asgi_scope(scope):
return Response.json(scope, default=repr)
async def parallel_queries(datasette):
db = datasette.get_database()
with tracer.trace_child_tasks():
one, two = await asyncio.gather(
db.execute("select coalesce(sleep(0.1), 1)"),
db.execute("select coalesce(sleep(0.1), 2)"),
)
return Response.json({"one": one.single_value(), "two": two.single_value()})
return [
(r"/one/$", one),
(r"/two/(?P<name>.*)$", two),
(r"/three/$", three),
(r"/post/$", post),
(r"/csrftoken-form/$", csrftoken_form),
(r"/login-as-root$", login_as_root),
(r"/not-async/$", not_async),
(r"/add-message/$", add_message),
(r"/render-message/$", render_message),
(r"/asgi-scope$", asgi_scope),
(r"/parallel-queries$", parallel_queries),
]
@hookimpl
def startup(datasette):
datasette._startup_hook_fired = True
# And test some import shortcuts too
from datasette import Response
from datasette import Forbidden
from datasette import NotFound
from datasette import hookimpl
from datasette import actor_matches_allow
_ = (Response, Forbidden, NotFound, hookimpl, actor_matches_allow)
@hookimpl
def canned_queries(datasette, database, actor):
return {"from_hook": f"select 1, '{actor['id'] if actor else 'null'}' as actor_id"}
@hookimpl
def register_magic_parameters():
from uuid import uuid4
def uuid(key, request):
if key == "new":
return str(uuid4())
else:
raise KeyError
def request(key, request):
if key == "http_version":
return request.scope["http_version"]
else:
raise KeyError
async def asyncrequest(key, request):
return key
return [
("request", request),
("uuid", uuid),
("asyncrequest", asyncrequest),
]
@hookimpl
def forbidden(datasette, request, message):
datasette._last_forbidden_message = message
if request.path == "/data2":
return Response.redirect("/login?message=" + message)
@hookimpl
def menu_links(datasette, actor, request):
if actor:
label = "Hello"
if request.args.get("_hello"):
label += ", " + request.args["_hello"]
return [{"href": datasette.urls.instance(), "label": label}]
@hookimpl
def table_actions(datasette, database, table, actor):
if actor:
return [
{
"href": datasette.urls.instance(),
"label": f"Database: {database}",
},
{"href": datasette.urls.instance(), "label": f"Table: {table}"},
]
@hookimpl
def view_actions(datasette, database, view, actor):
if actor:
return [
{
"href": datasette.urls.instance(),
"label": f"Database: {database}",
},
{"href": datasette.urls.instance(), "label": f"View: {view}"},
]
@hookimpl
def query_actions(datasette, database, query_name, sql):
# Don't explain an explain
if sql.lower().startswith("explain"):
return
return [
{
"href": datasette.urls.database(database)
+ "/-/query"
+ "?"
+ urllib.parse.urlencode(
{
"sql": "explain " + sql,
}
),
"label": "Explain this query",
"description": "Runs a SQLite explain",
},
]
@hookimpl
def row_actions(datasette, database, table, actor, row):
if actor:
return [
{
"href": datasette.urls.instance(),
"label": f"Row details for {actor['id']}",
"description": json.dumps(dict(row), default=repr),
},
]
@hookimpl
def database_actions(datasette, database, actor, request):
if actor:
label = f"Database: {database}"
if request.args.get("_hello"):
label += " - " + request.args["_hello"]
return [
{
"href": datasette.urls.instance(),
"label": label,
}
]
@hookimpl
def homepage_actions(datasette, actor, request):
if actor:
label = f"Custom homepage for: {actor['id']}"
return [
{
"href": datasette.urls.path("/-/custom-homepage"),
"label": label,
}
]
@hookimpl
def skip_csrf(scope):
return scope["path"] == "/skip-csrf"
@hookimpl
def register_actions(datasette):
extras_old = datasette.plugin_config("datasette-register-permissions") or {}
extras_new = datasette.plugin_config("datasette-register-actions") or {}
actions = [
Action(
name="action-from-plugin",
abbr="ap",
description="New action added by a plugin",
resource_class=DatabaseResource,
),
Action(
name="view-collection",
abbr="vc",
description="View a collection",
resource_class=DatabaseResource,
),
# Test actions for test_hook_custom_allowed (global actions - no resource_class)
Action(
name="this_is_allowed",
abbr=None,
description=None,
),
Action(
name="this_is_denied",
abbr=None,
description=None,
),
Action(
name="this_is_allowed_async",
abbr=None,
description=None,
),
Action(
name="this_is_denied_async",
abbr=None,
description=None,
),
]
# Support old-style config for backwards compatibility
if extras_old:
for p in extras_old["permissions"]:
# Map old takes_database/takes_resource to new global/resource_class
if p.get("takes_database"):
# Has database -> DatabaseResource
actions.append(
Action(
name=p["name"],
abbr=p["abbr"],
description=p["description"],
resource_class=DatabaseResource,
)
)
else:
# No database -> global action (no resource_class)
actions.append(
Action(
name=p["name"],
abbr=p["abbr"],
description=p["description"],
)
)
# Support new-style config
if extras_new:
for a in extras_new["actions"]:
# Check if this is a global action (no resource_class specified)
if not a.get("resource_class"):
actions.append(
Action(
name=a["name"],
abbr=a["abbr"],
description=a["description"],
)
)
else:
# Map string resource_class to actual class
resource_class_map = {
"DatabaseResource": DatabaseResource,
}
resource_class = resource_class_map.get(
a.get("resource_class", "DatabaseResource"), DatabaseResource
)
actions.append(
Action(
name=a["name"],
abbr=a["abbr"],
description=a["description"],
resource_class=resource_class,
)
)
return actions
@hookimpl
def permission_resources_sql(datasette, actor, action):
from datasette.permissions import PermissionSQL
# Handle test actions used in test_hook_custom_allowed
if action == "this_is_allowed":
return PermissionSQL.allow(reason="test plugin allows this_is_allowed")
elif action == "this_is_denied":
return PermissionSQL.deny(reason="test plugin denies this_is_denied")
elif action == "this_is_allowed_async":
return PermissionSQL.allow(reason="test plugin allows this_is_allowed_async")
elif action == "this_is_denied_async":
return PermissionSQL.deny(reason="test plugin denies this_is_denied_async")
elif action == "view-database-download":
# Return rule based on actor's can_download permission
if actor and actor.get("can_download"):
return PermissionSQL.allow(reason="actor has can_download")
else:
return None # No opinion
elif action == "view-database":
# Also grant view-database if actor has can_download (needed for download to work)
if actor and actor.get("can_download"):
return PermissionSQL.allow(
reason="actor has can_download, grants view-database"
)
else:
return None
elif action in (
"insert-row",
"create-table",
"drop-table",
"delete-row",
"update-row",
):
# Special permissions for latest.datasette.io demos
actor_id = actor.get("id") if actor else None
if actor_id == "todomvc":
return PermissionSQL.allow(reason=f"todomvc actor allowed for {action}")
return None
|
DummyFacet
|
python
|
pypa__pip
|
src/pip/_vendor/rich/segment.py
|
{
"start": 21880,
"end": 22710
}
|
class ____:
"""A simple renderable to render an iterable of segments. This class may be useful if
you want to print segments outside of a __rich_console__ method.
Args:
segments (Iterable[Segment]): An iterable of segments.
new_lines (bool, optional): Add new lines between segments. Defaults to False.
"""
def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
self.segments = list(segments)
self.new_lines = new_lines
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
line = Segment.line()
for segment in self.segments:
yield segment
yield line
else:
yield from self.segments
|
Segments
|
python
|
huggingface__transformers
|
src/transformers/models/deberta/modeling_deberta.py
|
{
"start": 32864,
"end": 36467
}
|
class ____(DebertaPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "deberta.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.legacy = config.legacy
self.deberta = DebertaModel(config)
if self.legacy:
self.cls = LegacyDebertaOnlyMLMHead(config)
else:
self._tied_weights_keys = {
"lm_predictions.lm_head.weight": "deberta.embeddings.word_embeddings.weight",
}
self.lm_predictions = DebertaOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
if self.legacy:
return self.cls.predictions.decoder
else:
return self.lm_predictions.lm_head.dense
def set_output_embeddings(self, new_embeddings):
if self.legacy:
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
else:
self.lm_predictions.lm_head.dense = new_embeddings
self.lm_predictions.lm_head.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.deberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
if self.legacy:
prediction_scores = self.cls(sequence_output)
else:
prediction_scores = self.lm_predictions(sequence_output, self.deberta.embeddings.word_embeddings)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
DebertaForMaskedLM
|
python
|
langchain-ai__langchain
|
libs/standard-tests/tests/unit_tests/test_embeddings.py
|
{
"start": 212,
"end": 484
}
|
class ____(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return DeterministicFakeEmbedding
@property
def embedding_model_params(self) -> dict:
return {"size": 6} # embedding dimension
|
TestFakeEmbeddingsUnit
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/scriptrunner/script_runner.py
|
{
"start": 2338,
"end": 5926
}
|
class ____(Enum):
# "Control" events. These are emitted when the ScriptRunner's state changes.
# The script started running.
SCRIPT_STARTED = "SCRIPT_STARTED"
# The script run stopped because of a compile error.
SCRIPT_STOPPED_WITH_COMPILE_ERROR = "SCRIPT_STOPPED_WITH_COMPILE_ERROR"
# The script run stopped because it ran to completion, or was
# interrupted by the user.
SCRIPT_STOPPED_WITH_SUCCESS = "SCRIPT_STOPPED_WITH_SUCCESS"
# The script run stopped in order to start a script run with newer widget state.
SCRIPT_STOPPED_FOR_RERUN = "SCRIPT_STOPPED_FOR_RERUN"
# The script run corresponding to a fragment ran to completion, or was interrupted
# by the user.
FRAGMENT_STOPPED_WITH_SUCCESS = "FRAGMENT_STOPPED_WITH_SUCCESS"
# The ScriptRunner is done processing the ScriptEventQueue and
# is shut down.
SHUTDOWN = "SHUTDOWN"
# "Data" events. These are emitted when the ScriptRunner's script has
# data to send to the frontend.
# The script has a ForwardMsg to send to the frontend.
ENQUEUE_FORWARD_MSG = "ENQUEUE_FORWARD_MSG"
"""
Note [Threading]
There are two kinds of threads in Streamlit, the main thread and script threads.
The main thread is started by invoking the Streamlit CLI, and bootstraps the
framework and runs the Tornado webserver.
A script thread is created by a ScriptRunner when it starts. The script thread
is where the ScriptRunner executes, including running the user script itself,
processing messages to/from the frontend, and all the Streamlit library function
calls in the user script.
It is possible for the user script to spawn its own threads, which could call
Streamlit functions. We restrict the ScriptRunner's execution control to the
script thread. Calling Streamlit functions from other threads is unlikely to
work correctly due to lack of ScriptRunContext, so we may add a guard against
it in the future.
"""
# For projects that have a pages folder, we assume that this is a script that
# is designed to leverage our original v1 version of multi-page apps. This
# function will be called to run the script in lieu of the main script. This
# function simulates the v1 setup using the modern v2 commands (st.navigation)
def _mpa_v1(main_script_path: str) -> None:
from pathlib import Path
from streamlit.commands.navigation import PageType, _navigation
from streamlit.navigation.page import StreamlitPage
# Select the folder that should be used for the pages:
resolved_main_script_path: Final = Path(main_script_path).resolve()
pages_folder: Final = resolved_main_script_path.parent / "pages"
# Read out the my_pages folder and create a page for every script:
pages = sorted(
[
page
for page in pages_folder.glob("*.py")
if page.name.endswith(".py")
and not page.name.startswith(".")
and page.name != "__init__.py"
],
key=page_sort_key,
)
# Use this script as the main page and
main_page = StreamlitPage(resolved_main_script_path, default=True)
all_pages = [main_page] + [
StreamlitPage(pages_folder / page.name) for page in pages
]
# Initialize the navigation with all the pages:
position: Literal["sidebar", "hidden", "top"] = (
"hidden"
if config.get_option("client.showSidebarNavigation") is False
else "sidebar"
)
page = _navigation(
cast("list[PageType]", all_pages),
position=position,
expanded=False,
)
page.run()
|
ScriptRunnerEvent
|
python
|
PyCQA__pylint
|
tests/functional/ext/docparams/parameter/missing_param_doc_required_no_doc_rgx_test_all.py
|
{
"start": 283,
"end": 732
}
|
class ____:
"""test_all_docstring_rgx
Function that matches "check all functions" 'no-docstring-rgx' config option
No error message is emitted.
"""
def __init__(self, my_param: int) -> None:
"""
My init docstring
:param my_param: My first param
"""
# test_fail_empty_docstring_rgx
# Function that matches "check all functions" 'no-docstring-rgx' config option
# An error message is emitted.
|
MyClass
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_events_vitals.py
|
{
"start": 415,
"end": 9513
}
|
class ____(APITestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.start = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.end = self.start + timedelta(hours=6)
self.transaction_data = load_data("transaction", timestamp=self.start)
self.query: dict[str, str | list[str]] = {
"start": self.start.isoformat(),
"end": self.end.isoformat(),
}
self.features: dict[str, bool] = {}
def store_event(self, data, measurements=None, **kwargs):
if measurements:
for vital, value in measurements.items():
data["measurements"][vital]["value"] = value
return super().store_event(
data.copy(),
project_id=self.project.id,
)
def do_request(self, query=None, features=None):
if features is None:
features = {"organizations:discover-basic": True}
features.update(self.features)
if query is None:
query = self.query
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-organization-events-vitals",
kwargs={"organization_id_or_slug": self.organization.slug},
)
with self.feature(features):
return self.client.get(url, query, format="json")
def test_no_projects(self) -> None:
response = self.do_request()
assert response.status_code == 200, response.content
assert len(response.data) == 0
def test_no_vitals(self) -> None:
self.store_event(
self.transaction_data,
project_id=self.project.id,
)
self.query.update({"vital": []})
response = self.do_request()
assert response.status_code == 400, response.content
assert "Need to pass at least one vital" == response.data["detail"]
def test_bad_vital(self) -> None:
self.store_event(
self.transaction_data,
project_id=self.project.id,
)
self.query.update({"vital": ["foobar"]})
response = self.do_request()
assert response.status_code == 400, response.content
assert "foobar is not a valid vital" == response.data["detail"]
def test_simple(self) -> None:
data = self.transaction_data.copy()
for lcp in [2000, 3000, 5000]:
self.store_event(
data,
{"lcp": lcp},
project_id=self.project.id,
)
self.query.update({"vital": ["measurements.lcp"]})
response = self.do_request()
assert response.status_code == 200, response.content
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 1,
"meh": 1,
"poor": 1,
"total": 3,
"p75": 4000,
}
def test_simple_with_refining_user_misery_filter(self) -> None:
project1 = self.create_project(organization=self.organization)
project2 = self.create_project(organization=self.organization)
ProjectTransactionThreshold.objects.create(
project=project1,
organization=project1.organization,
threshold=100,
metric=TransactionMetric.LCP.value,
)
ProjectTransactionThreshold.objects.create(
project=project2,
organization=project2.organization,
threshold=1000,
metric=TransactionMetric.LCP.value,
)
data = self.transaction_data.copy()
for project in [project1, project2]:
for lcp in [2000, 3000, 5000]:
self.store_event(
data,
{"lcp": lcp},
project_id=project.id,
)
self.query.update({"vital": ["measurements.lcp"]})
response = self.do_request(features={"organizations:discover-basic": True})
assert response.status_code == 200, response.content
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 0,
"meh": 1,
"poor": 1,
"total": 2,
"p75": 4500,
}
self.query.update({"query": "user_misery():<0.04"})
response = self.do_request(features={"organizations:discover-basic": True})
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 0,
"meh": 1,
"poor": 1,
"total": 2,
"p75": 4500,
}
def test_grouping(self) -> None:
counts = [
(100, 2),
(3000, 3),
(4500, 1),
]
for duration, count in counts:
for _ in range(count):
self.store_event(
load_data("transaction", timestamp=self.start),
{"lcp": duration},
project_id=self.project.id,
)
self.query.update({"vital": ["measurements.lcp"]})
response = self.do_request()
assert response.status_code == 200
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 2,
"meh": 3,
"poor": 1,
"total": 6,
"p75": 3000,
}
def test_multiple_vitals(self) -> None:
vitals = {"lcp": 3000, "fid": 50, "cls": 0.15, "fcp": 5000, "fp": 4000}
self.store_event(
load_data("transaction", timestamp=self.start),
vitals,
project_id=self.project.id,
)
self.query.update(
{
"vital": [
"measurements.lcp",
"measurements.fid",
"measurements.cls",
"measurements.fcp",
"measurements.fp",
]
}
)
response = self.do_request()
assert response.status_code == 200
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 0,
"meh": 1,
"poor": 0,
"total": 1,
"p75": 3000,
}
assert response.data["measurements.fid"] == {
"good": 1,
"meh": 0,
"poor": 0,
"total": 1,
"p75": 50,
}
assert response.data["measurements.cls"] == {
"good": 0,
"meh": 1,
"poor": 0,
"total": 1,
"p75": 0.15,
}
assert response.data["measurements.fcp"] == {
"good": 0,
"meh": 0,
"poor": 1,
"total": 1,
"p75": 5000,
}
assert response.data["measurements.fp"] == {
"good": 0,
"meh": 0,
"poor": 1,
"total": 1,
"p75": 4000,
}
def test_transactions_without_vitals(self) -> None:
del self.transaction_data["measurements"]
self.store_event(
self.transaction_data,
project_id=self.project.id,
)
self.query.update({"vital": ["measurements.lcp", "measurements.fcp"]})
response = self.do_request()
assert response.status_code == 200, response.data
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 0,
"meh": 0,
"poor": 0,
"total": 0,
"p75": None,
}
assert response.data["measurements.fcp"] == {
"good": 0,
"meh": 0,
"poor": 0,
"total": 0,
"p75": None,
}
def test_edges_of_vital_thresholds(self) -> None:
self.store_event(
load_data("transaction", timestamp=self.start),
{"lcp": 4000, "fp": 1000, "fcp": 0},
project_id=self.project.id,
)
self.query.update({"vital": ["measurements.lcp", "measurements.fp", "measurements.fcp"]})
response = self.do_request()
assert response.status_code == 200, response.data
assert not response.data["meta"]["isMetricsData"]
assert response.data["measurements.lcp"] == {
"good": 0,
"meh": 0,
"poor": 1,
"total": 1,
"p75": 4000,
}
assert response.data["measurements.fp"] == {
"good": 0,
"meh": 1,
"poor": 0,
"total": 1,
"p75": 1000,
}
assert response.data["measurements.fcp"] == {
"good": 1,
"meh": 0,
"poor": 0,
"total": 1,
"p75": 0,
}
|
OrganizationEventsVitalsEndpointTest
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_collectors.py
|
{
"start": 3462,
"end": 3835
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'apparmor']
valid_subsets = ['apparmor']
fact_namespace = 'ansible_apparmor'
collector_class = ApparmorFactCollector
def test_collect(self):
facts_dict = super(TestApparmorFacts, self)._test_collect()
self.assertIn('status', facts_dict['apparmor'])
|
TestApparmorFacts
|
python
|
google__pytype
|
pytype/rewrite/tests/test_basic.py
|
{
"start": 3786,
"end": 4249
}
|
class ____(RewriteTest):
"""Enum tests."""
def test_member(self):
self.Check("""
import enum
class E(enum.Enum):
X = 42
assert_type(E.X, E)
""")
def test_member_pyi(self):
with self.DepTree([('foo.pyi', """
import enum
class E(enum.Enum):
X = 42
""")]):
self.Check("""
import foo
assert_type(foo.E.X, foo.E)
""")
if __name__ == '__main__':
test_base.main()
|
EnumTest
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/where/tutorial011_py310.py
|
{
"start": 76,
"end": 1559
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str
secret_name: str
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(col(Hero.age) >= 35)
results = session.exec(statement)
for hero in results:
print(hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
langchain-ai__langchain
|
libs/core/langchain_core/runnables/utils.py
|
{
"start": 9638,
"end": 13238
}
|
class ____(ast.NodeVisitor):
"""Get the source code of a lambda function."""
def __init__(self) -> None:
"""Initialize the visitor."""
self.source: str | None = None
self.count = 0
@override
def visit_Lambda(self, node: ast.Lambda) -> None:
"""Visit a lambda function.
Args:
node: The node to visit.
"""
self.count += 1
if hasattr(ast, "unparse"):
self.source = ast.unparse(node)
def get_function_first_arg_dict_keys(func: Callable) -> list[str] | None:
"""Get the keys of the first argument of a function if it is a dict.
Args:
func: The function to check.
Returns:
The keys of the first argument if it is a dict, None otherwise.
"""
try:
code = inspect.getsource(func)
tree = ast.parse(textwrap.dedent(code))
visitor = IsFunctionArgDict()
visitor.visit(tree)
return sorted(visitor.keys) if visitor.keys else None
except (SyntaxError, TypeError, OSError, SystemError):
return None
def get_lambda_source(func: Callable) -> str | None:
"""Get the source code of a lambda function.
Args:
func: a Callable that can be a lambda function.
Returns:
the source code of the lambda function.
"""
try:
name = func.__name__ if func.__name__ != "<lambda>" else None
except AttributeError:
name = None
try:
code = inspect.getsource(func)
tree = ast.parse(textwrap.dedent(code))
visitor = GetLambdaSource()
visitor.visit(tree)
except (SyntaxError, TypeError, OSError, SystemError):
return name
return visitor.source if visitor.count == 1 else name
@lru_cache(maxsize=256)
def get_function_nonlocals(func: Callable) -> list[Any]:
"""Get the nonlocal variables accessed by a function.
Args:
func: The function to check.
Returns:
The nonlocal variables accessed by the function.
"""
try:
code = inspect.getsource(func)
tree = ast.parse(textwrap.dedent(code))
visitor = FunctionNonLocals()
visitor.visit(tree)
values: list[Any] = []
closure = (
inspect.getclosurevars(func.__wrapped__)
if hasattr(func, "__wrapped__") and callable(func.__wrapped__)
else inspect.getclosurevars(func)
)
candidates = {**closure.globals, **closure.nonlocals}
for k, v in candidates.items():
if k in visitor.nonlocals:
values.append(v)
for kk in visitor.nonlocals:
if "." in kk and kk.startswith(k):
vv = v
for part in kk.split(".")[1:]:
if vv is None:
break
try:
vv = getattr(vv, part)
except AttributeError:
break
else:
values.append(vv)
except (SyntaxError, TypeError, OSError, SystemError):
return []
return values
def indent_lines_after_first(text: str, prefix: str) -> str:
"""Indent all lines of text after the first line.
Args:
text: The text to indent.
prefix: Used to determine the number of spaces to indent.
Returns:
The indented text.
"""
n_spaces = len(prefix)
spaces = " " * n_spaces
lines = text.splitlines()
return "\n".join([lines[0]] + [spaces + line for line in lines[1:]])
|
GetLambdaSource
|
python
|
getsentry__sentry
|
src/sentry/attachments/redis.py
|
{
"start": 178,
"end": 635
}
|
class ____(BaseAttachmentCache):
def __init__(self, **options):
cluster_id = options.pop("cluster_id", None)
if cluster_id is None:
cluster_id = getattr(settings, "SENTRY_ATTACHMENTS_REDIS_CLUSTER", "rc-short")
BaseAttachmentCache.__init__(self, inner=RedisClusterCache(cluster_id, **options))
# Confusing legacy name for RediscClusterCache
RedisAttachmentCache = RedisClusterAttachmentCache
|
RedisClusterAttachmentCache
|
python
|
pytorch__pytorch
|
test/distributed/checkpoint/e2e/test_e2e_save_and_load.py
|
{
"start": 2534,
"end": 2875
}
|
class ____:
def __init__(self) -> None:
self.data = torch.rand(10, 10, device=device_type)
def state_dict(self):
return {"data": self.data}
def load_state_dict(self, state_dict):
self.data = state_dict["data"]
def __eq__(self, other):
return torch.equal(self.data, other.data)
|
TestStatefulObj
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_scalarmath.py
|
{
"start": 13753,
"end": 16734
}
|
class ____(TestCase):
@skip(reason="With pytorch, 1/(0+0j) is nan + nan*j, not inf + nan*j")
def test_zero_division(self):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b / a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b / a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b / a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b / a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b / a))
b = t(0.0)
assert_(np.isnan(b / a))
def test_signed_zeros(self):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = (
((0.0, -1.0), (0.0, 1.0), (-1.0, -0.0)),
((0.0, -1.0), (0.0, -1.0), (1.0, -0.0)),
((0.0, -1.0), (-0.0, -1.0), (1.0, 0.0)),
((0.0, -1.0), (-0.0, 1.0), (-1.0, 0.0)),
((0.0, 1.0), (0.0, -1.0), (-1.0, 0.0)),
((0.0, -1.0), (0.0, -1.0), (1.0, -0.0)),
((-0.0, -1.0), (0.0, -1.0), (1.0, -0.0)),
((-0.0, 1.0), (0.0, -1.0), (-1.0, -0.0)),
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
for t in [np.complex64, np.complex128]:
# tupled (numerator, denominator, expected)
# for testing as expected == numerator/denominator
data = []
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by else condition as neither are == 0
data.append(((2.0, 1.0), (2.0, 1.0), (1.0, 0.0)))
# trigger branch: real(fabs(denom)) > imag(fabs(denom))
# followed by if condition as both are == 0
# is performed in test_zero_division(), so this is skipped
# trigger else if branch: real(fabs(denom)) < imag(fabs(denom))
data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
# check real and imag parts separately to avoid comparison
# in array context, which does not account for signed zeros
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
|
TestComplexDivision
|
python
|
numpy__numpy
|
tools/swig/test/testMatrix.py
|
{
"start": 12312,
"end": 12585
}
|
class ____(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
######################################################################
|
ulongLongTestCase
|
python
|
scipy__scipy
|
scipy/stats/_continuous_distns.py
|
{
"start": 80159,
"end": 85960
}
|
class ____(rv_continuous):
r"""Weibull minimum continuous random variable.
The Weibull Minimum Extreme Value distribution, from extreme value theory
(Fisher-Gnedenko theorem), is also often simply called the Weibull
distribution. It arises as the limiting distribution of the rescaled
minimum of iid random variables.
%(before_notes)s
See Also
--------
weibull_max, numpy.random.Generator.weibull, exponweib
Notes
-----
The probability density function for `weibull_min` is:
.. math::
f(x, c) = c x^{c-1} \exp(-x^c)
for :math:`x > 0`, :math:`c > 0`.
`weibull_min` takes ``c`` as a shape parameter for :math:`c`.
(named :math:`k` in Wikipedia article and :math:`a` in
``numpy.random.weibull``). Special shape values are :math:`c=1` and
:math:`c=2` where Weibull distribution reduces to the `expon` and
`rayleigh` distributions respectively.
Suppose ``X`` is an exponentially distributed random variable with
scale ``s``. Then ``Y = X**k`` is `weibull_min` distributed with shape
``c = 1/k`` and scale ``s**k``.
%(after_notes)s
References
----------
https://en.wikipedia.org/wiki/Weibull_distribution
https://en.wikipedia.org/wiki/Fisher-Tippett-Gnedenko_theorem
%(example)s
"""
def _shape_info(self):
return [_ShapeInfo("c", False, (0, np.inf), (False, False))]
def _pdf(self, x, c):
# weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
return c*pow(x, c-1)*np.exp(-pow(x, c))
def _logpdf(self, x, c):
return np.log(c) + sc.xlogy(c - 1, x) - pow(x, c)
def _cdf(self, x, c):
return -sc.expm1(-pow(x, c))
def _ppf(self, q, c):
return pow(-sc.log1p(-q), 1.0/c)
def _sf(self, x, c):
return np.exp(self._logsf(x, c))
def _logsf(self, x, c):
return -pow(x, c)
def _isf(self, q, c):
return (-np.log(q))**(1/c)
def _munp(self, n, c):
return sc.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - np.log(c) + _EULER + 1
@extend_notes_in_docstring(rv_continuous, notes="""\
If ``method='mm'``, parameters fixed by the user are respected, and the
remaining parameters are used to match distribution and sample moments
where possible. For example, if the user fixes the location with
``floc``, the parameters will only match the distribution skewness and
variance to the sample skewness and variance; no attempt will be made
to match the means or minimize a norm of the errors.
\n\n""")
def fit(self, data, *args, **kwds):
if isinstance(data, CensoredData):
if data.num_censored() == 0:
data = data._uncensor()
else:
return super().fit(data, *args, **kwds)
if kwds.pop('superfit', False):
return super().fit(data, *args, **kwds)
# this extracts fixed shape, location, and scale however they
# are specified, and also leaves them in `kwds`
data, fc, floc, fscale = _check_fit_input_parameters(self, data,
args, kwds)
method = kwds.get("method", "mle").lower()
# See https://en.wikipedia.org/wiki/Weibull_distribution#Moments for
# moment formulas.
def skew(c):
gamma1 = sc.gamma(1+1/c)
gamma2 = sc.gamma(1+2/c)
gamma3 = sc.gamma(1+3/c)
num = 2 * gamma1**3 - 3*gamma1*gamma2 + gamma3
den = (gamma2 - gamma1**2)**(3/2)
return num/den
# For c in [1e2, 3e4], population skewness appears to approach
# asymptote near -1.139, but past c > 3e4, skewness begins to vary
# wildly, and MoM won't provide a good guess. Get out early.
s = stats.skew(data)
max_c = 1e4
s_min = skew(max_c)
if s < s_min and method != "mm" and fc is None and not args:
return super().fit(data, *args, **kwds)
# If method is method of moments, we don't need the user's guesses.
# Otherwise, extract the guesses from args and kwds.
if method == "mm":
c, loc, scale = None, None, None
else:
c = args[0] if len(args) else None
loc = kwds.pop('loc', None)
scale = kwds.pop('scale', None)
if fc is None and c is None: # not fixed and no guess: use MoM
# Solve for c that matches sample distribution skewness to sample
# skewness.
# we start having numerical issues with `weibull_min` with
# parameters outside this range - and not just in this method.
# We could probably improve the situation by doing everything
# in the log space, but that is for another time.
c = root_scalar(lambda c: skew(c) - s, bracket=[0.02, max_c],
method='bisect').root
elif fc is not None: # fixed: use it
c = fc
if fscale is None and scale is None:
v = np.var(data)
scale = np.sqrt(v / (sc.gamma(1+2/c) - sc.gamma(1+1/c)**2))
elif fscale is not None:
scale = fscale
if floc is None and loc is None:
m = np.mean(data)
loc = m - scale*sc.gamma(1 + 1/c)
elif floc is not None:
loc = floc
if method == 'mm':
return c, loc, scale
else:
# At this point, parameter "guesses" may equal the fixed parameters
# in kwds. No harm in passing them as guesses, too.
return super().fit(data, c, loc=loc, scale=scale, **kwds)
weibull_min = weibull_min_gen(a=0.0, name='weibull_min')
|
weibull_min_gen
|
python
|
pytorch__pytorch
|
test/distributed/fsdp/test_fsdp_use_orig_params.py
|
{
"start": 22168,
"end": 28508
}
|
class ____(FSDPTest):
"""Tests the unshard/reshard flow."""
@property
def world_size(self) -> int:
return 2
def _get_fsdp_models_and_optims(
self,
sharding_strategy: ShardingStrategy,
cpu_offload: CPUOffload,
) -> tuple[FSDP, torch.optim.Optimizer, FSDP, torch.optim.Optimizer]:
"""
Returns a pair of (FSDP model, optimizer) for ``use_orig_params=False``
and ``True``, respectively.
"""
LR = 1e-2
fsdp_kwargs = {
"sharding_strategy": sharding_strategy,
"cpu_offload": cpu_offload,
"use_orig_params": False,
}
fsdp_model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs=fsdp_kwargs,
deterministic=True,
)
optim = torch.optim.Adam(fsdp_model.parameters(), foreach=False, lr=LR)
fsdp_kwargs["use_orig_params"] = True
fsdp_model_orig_params = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.RECURSIVE,
DEVICEInitMode.DEVICE_BEFORE,
fsdp_kwargs=fsdp_kwargs,
deterministic=True,
)
optim_orig_params = torch.optim.Adam(
fsdp_model_orig_params.parameters(), foreach=False, lr=LR
)
return fsdp_model, optim, fsdp_model_orig_params, optim_orig_params
def _check_fsdp_parameter_parity(self, fsdp1: FSDP, fsdp2: FSDP) -> None:
"""Checks that two FSDP instances have the same model parameters."""
with FSDP.summon_full_params(fsdp1), FSDP.summon_full_params(fsdp2):
for (n1, p1), (n2, p2) in zip(
fsdp1.named_parameters(),
fsdp2.named_parameters(),
):
self.assertEqual(n1, n2)
torch.testing.assert_close(p1, p2)
def _get_fsdp_parity_subtest_config(self):
return {
"sharding_strategy": [
ShardingStrategy.NO_SHARD,
ShardingStrategy.SHARD_GRAD_OP,
ShardingStrategy.FULL_SHARD,
],
}
@skip_if_lt_x_gpu(2)
@parametrize("offload_params", [False, True])
def test_multiple_forward(self, offload_params: bool):
"""
Tests that ``use_orig_params=True`` has parity with ``False`` when
running multiple forward passes before a backward pass.
"""
cpu_offload = CPUOffload(offload_params=offload_params)
self.run_subtests(
self._get_fsdp_parity_subtest_config(),
self._test_multiple_forward,
cpu_offload=cpu_offload,
)
@skip_if_lt_x_gpu(2)
def _test_multiple_forward(
self,
sharding_strategy: ShardingStrategy,
cpu_offload: CPUOffload,
):
(
fsdp_model,
optim,
fsdp_model_orig_params,
optim_orig_params,
) = self._get_fsdp_models_and_optims(sharding_strategy, cpu_offload)
device = torch.device(device_type)
for _ in range(3):
inp1 = fsdp_model.get_input(device)
_inp2 = fsdp_model.get_input(device)
inp2 = tuple(
t + torch.ones_like(t) for t in _inp2
) # make different from `inp1`
# For these loss lists: elem 0 is baseline; elem 1 is test
losses1 = []
losses2 = []
losses = []
for _model, _optim in (
(fsdp_model, optim),
(
fsdp_model_orig_params,
optim_orig_params,
),
):
_optim.zero_grad()
loss1 = _model(*inp1)
losses1.append(loss1)
loss2 = _model(*inp2)
losses2.append(loss2)
loss = (loss1 + loss2).sum()
losses.append(loss)
_model.run_backward(loss)
_optim.step()
self.assertEqual(losses1[0], losses1[1])
self.assertEqual(losses2[0], losses2[1])
self.assertEqual(losses[0], losses[1])
self._check_fsdp_parameter_parity(fsdp_model, fsdp_model_orig_params)
@skip_if_lt_x_gpu(2)
@parametrize("offload_params", [False, True])
def test_summon_between_two_forwards(self, offload_params: bool):
"""
Tests that ``use_orig_params=True`` has parity with ``False`` when
running a forward pass, :meth:`summon_full_params()`, and another
forward pass before a backward pass.
"""
cpu_offload = CPUOffload(offload_params=offload_params)
self.run_subtests(
self._get_fsdp_parity_subtest_config(),
self._test_summon_between_two_forwards,
cpu_offload=cpu_offload,
)
def _test_summon_between_two_forwards(
self,
sharding_strategy: ShardingStrategy,
cpu_offload: CPUOffload,
):
(
fsdp_model,
optim,
fsdp_model_orig_params,
optim_orig_params,
) = self._get_fsdp_models_and_optims(sharding_strategy, cpu_offload)
device = torch.device(device_type)
for _ in range(3):
optim.zero_grad()
optim_orig_params.zero_grad()
inp1 = fsdp_model.get_input(device)
loss1 = fsdp_model(*inp1)
loss_orig_params1 = fsdp_model_orig_params(*inp1)
self.assertEqual(loss1, loss_orig_params1)
# Calls into `summon_full_params()`
self._check_fsdp_parameter_parity(fsdp_model, fsdp_model_orig_params)
inp2 = fsdp_model.get_input(device)
loss2 = fsdp_model(*inp2)
loss_orig_params2 = fsdp_model_orig_params(*inp2)
self.assertEqual(loss2, loss_orig_params2)
loss = (loss1 + loss2).sum()
loss_orig_params = (loss_orig_params1 + loss_orig_params2).sum()
fsdp_model.run_backward(loss)
fsdp_model_orig_params.run_backward(loss_orig_params)
optim.step()
optim_orig_params.step()
self._check_fsdp_parameter_parity(fsdp_model, fsdp_model_orig_params)
|
TestFSDPUseOrigParamsUnshardReshard
|
python
|
getsentry__sentry
|
src/sentry/plugins/bases/notify.py
|
{
"start": 638,
"end": 698
}
|
class ____(forms.Form):
pass
|
NotificationConfigurationForm
|
python
|
pypa__pip
|
src/pip/_vendor/pygments/lexer.py
|
{
"start": 27965,
"end": 28364
}
|
class ____:
"""
A helper object that holds lexer position data.
"""
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):
return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
|
LexerContext
|
python
|
run-llama__llama_index
|
llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-dynamodb/llama_index/storage/chat_store/dynamodb/base.py
|
{
"start": 797,
"end": 15256
}
|
class ____(BaseChatStore):
"""
DynamoDB Chat Store.
Args:
table_name (str): The name of the preexisting DynamoDB table.
primary_key (str, optional): The primary/partition key to use for the table.
Defaults to "SessionId".
profile_name (str, optional): The AWS profile to use. If not specified, then
the default AWS profile is used.
aws_access_key_id (str, optional): The AWS Access Key ID to use.
aws_secret_access_key (str, optional): The AWS Secret Access Key to use.
aws_session_token (str, optional): The AWS Session Token to use.
botocore_session (Any, optional): Use this Botocore session instead of creating a new default one.
botocore_config (Any, optional): Custom configuration object to use instead of the default generated one.
region_name (str, optional): The AWS region name to use. Uses the region configured in AWS CLI if not passed.
max_retries (int, optional): The maximum number of API retries. Defaults to 10.
timeout (float, optional): The timeout for API requests in seconds. Defaults to 60.0.
session_kwargs (Dict[str, Any], optional): Additional kwargs for the `boto3.Session` object.
resource_kwargs (Dict[str, Any], optional): Additional kwargs for the `boto3.Resource` object.
ttl_seconds (Optional[int], optional): Time-to-live in seconds for items in the table.
If set, items will expire after this many seconds. Defaults to None (no expiration).
ttl_attribute (str, optional): The name of the attribute to use for TTL.
Defaults to "TTL".
Returns:
DynamoDBChatStore: A DynamoDB chat store object.
"""
table_name: str = Field(description="DynamoDB table")
primary_key: str = Field(
default="SessionId", description="Primary/partition key to use for the table."
)
profile_name: Optional[str] = Field(
description="AWS profile to use. If not specified, then the default AWS profile is used."
)
aws_access_key_id: Optional[str] = Field(
description="AWS Access Key ID to use.", exclude=True
)
aws_secret_access_key: Optional[str] = Field(
description="AWS Secret Access Key to use.", exclude=True
)
aws_session_token: Optional[str] = Field(
description="AWS Session Token to use.", exclude=True
)
botocore_session: Optional[Any] = Field(
description="Use this Botocore session instead of creating a new default one.",
exclude=True,
)
botocore_config: Optional[Any] = Field(
description="Custom configuration object to use instead of the default generated one.",
exclude=True,
)
region_name: Optional[str] = Field(
description="AWS region name to use. Uses the region configured in AWS CLI if not passed",
exclude=True,
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", gt=0
)
timeout: float = Field(
default=60.0,
description="The timeout for API requests in seconds.",
)
session_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the `boto3.Session` object.",
)
resource_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the `boto3.Resource` object.",
)
ttl_seconds: Optional[int] = Field(
default=None,
description="Time-to-live in seconds for items in the table. If set, items will expire after this many seconds.",
)
ttl_attribute: str = Field(
default="TTL",
description="The name of the attribute to use for TTL.",
)
_client: ServiceResource = PrivateAttr()
_table: Any = PrivateAttr()
_aclient: ServiceResource = PrivateAttr()
_atable: Any = PrivateAttr()
def __init__(
self,
table_name: str,
primary_key: str = "SessionId",
profile_name: Optional[str] = None,
region_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
botocore_session: Optional[Any] = None,
botocore_config: Optional[Any] = None,
max_retries: int = 10,
timeout: float = 60.0,
session_kwargs: Optional[Dict[str, Any]] = None,
resource_kwargs: Optional[Dict[str, Any]] = None,
ttl_seconds: Optional[int] = None,
ttl_attribute: str = "TTL",
):
session_kwargs = session_kwargs or {}
resource_kwargs = resource_kwargs or {}
super().__init__(
table_name=table_name,
primary_key=primary_key,
profile_name=profile_name,
region_name=region_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
botocore_session=botocore_session,
botocore_config=botocore_config,
max_retries=max_retries,
timeout=timeout,
session_kwargs=session_kwargs,
resource_kwargs=resource_kwargs,
ttl_seconds=ttl_seconds,
ttl_attribute=ttl_attribute,
)
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
**session_kwargs,
}
try:
import boto3
from botocore.config import Config
config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 package not found, install with 'pip install boto3"
)
self._client = session.resource("dynamodb", config=config, **resource_kwargs)
self._table = self._client.Table(table_name)
async def init_async_table(self):
"""Initialize asynchronous table."""
if self._atable is None:
try:
import aioboto3
async_session = aioboto3.Session(**self.session_kwargs)
except ImportError:
raise ImportError(
"aioboto3 package not found, install with 'pip install aioboto3'"
)
async with async_session.resource(
"dynamodb", config=self.botocore_config, **self.resource_kwargs
) as dynamodb:
self._atable = await dynamodb.Table(self.table_name)
@classmethod
def class_name(self) -> str:
return "DynamoDBChatStore"
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""
Assign all provided messages to the row with the given key.
Any pre-existing messages for that key will be overwritten.
Args:
key (str): The key specifying a row.
messages (List[ChatMessage]): The messages to assign to the key.
Returns:
None
"""
item = {self.primary_key: key, "History": _messages_to_dict(messages)}
# Add TTL if configured
if self.ttl_seconds is not None:
item[self.ttl_attribute] = int(time.time()) + self.ttl_seconds
self._table.put_item(Item=item)
async def aset_messages(self, key: str, messages: List[ChatMessage]) -> None:
self.init_async_table()
item = {self.primary_key: key, "History": _messages_to_dict(messages)}
# Add TTL if configured
if self.ttl_seconds is not None:
item[self.ttl_attribute] = int(time.time()) + self.ttl_seconds
await self._atable.put_item(Item=item)
def get_messages(self, key: str) -> List[ChatMessage]:
"""
Retrieve all messages for the given key.
Args:
key (str): The key specifying a row.
Returns:
List[ChatMessage]: The messages associated with the key.
"""
response = self._table.get_item(Key={self.primary_key: key})
if response and "Item" in response:
message_history = response["Item"]["History"]
else:
message_history = []
return [_dict_to_message(message) for message in message_history]
async def aget_messages(self, key: str) -> List[ChatMessage]:
self.init_async_table()
response = await self._atable.get_item(Key={self.primary_key: key})
if response and "Item" in response:
message_history = response["Item"]["History"]
else:
message_history = []
return [_dict_to_message(message) for message in message_history]
def add_message(self, key: str, message: ChatMessage) -> None:
"""
Add a message to the end of the chat history for the given key.
Creates a new row if the key does not exist.
Args:
key (str): The key specifying a row.
message (ChatMessage): The message to add to the chat history.
Returns:
None
"""
current_messages = _messages_to_dict(self.get_messages(key))
current_messages.append(_message_to_dict(message))
item = {self.primary_key: key, "History": current_messages}
# Add TTL if configured
if self.ttl_seconds is not None:
item[self.ttl_attribute] = int(time.time()) + self.ttl_seconds
self._table.put_item(Item=item)
async def async_add_message(self, key: str, message: ChatMessage) -> None:
self.init_async_table()
current_messages = _messages_to_dict(await self.aget_messages(key))
current_messages.append(_message_to_dict(message))
item = {self.primary_key: key, "History": current_messages}
# Add TTL if configured
if self.ttl_seconds is not None:
item[self.ttl_attribute] = int(time.time()) + self.ttl_seconds
await self._atable.put_item(Item=item)
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""
Deletes the entire chat history for the given key (i.e. the row).
Args:
key (str): The key specifying a row.
Returns:
Optional[List[ChatMessage]]: The messages that were deleted. None if the
deletion failed.
"""
messages_to_delete = self.get_messages(key)
self._table.delete_item(Key={self.primary_key: key})
return messages_to_delete
async def adelete_messages(self, key: str) -> Optional[List[ChatMessage]]:
self.init_async_table()
messages_to_delete = await self.aget_messages(key)
await self._atable.delete_item(Key={self.primary_key: key})
return messages_to_delete
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""
Deletes the message at the given index for the given key.
Args:
key (str): The key specifying a row.
idx (int): The index of the message to delete.
Returns:
Optional[ChatMessage]: The message that was deleted. None if the index
did not exist.
"""
current_messages = self.get_messages(key)
try:
message_to_delete = current_messages[idx]
del current_messages[idx]
self.set_messages(key, current_messages)
return message_to_delete
except IndexError:
logger.error(
IndexError(f"No message exists at index, {idx}, for key {key}")
)
return None
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
self.init_async_table()
current_messages = await self.aget_messages(key)
try:
message_to_delete = current_messages[idx]
del current_messages[idx]
await self.aset_messages(key, current_messages)
return message_to_delete
except IndexError:
logger.error(
IndexError(f"No message exists at index, {idx}, for key {key}")
)
return None
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""
Deletes the last message in the chat history for the given key.
Args:
key (str): The key specifying a row.
Returns:
Optional[ChatMessage]: The message that was deleted. None if the chat history
was empty.
"""
return self.delete_message(key, -1)
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
return self.adelete_message(key, -1)
def get_keys(self) -> List[str]:
"""
Retrieve all keys in the table.
Returns:
List[str]: The keys in the table.
"""
response = self._table.scan(ProjectionExpression=self.primary_key)
keys = [item[self.primary_key] for item in response["Items"]]
while "LastEvaluatedKey" in response:
response = self._table.scan(
ProjectionExpression=self.primary_key,
ExclusiveStartKey=response["LastEvaluatedKey"],
)
keys.extend([item[self.primary_key] for item in response["Items"]])
return keys
async def aget_keys(self) -> List[str]:
self.init_async_table()
response = await self._atable.scan(ProjectionExpression=self.primary_key)
keys = [item[self.primary_key] for item in response["Items"]]
while "LastEvaluatedKey" in response:
response = await self._atable.scan(
ProjectionExpression=self.primary_key,
ExclusiveStartKey=response["LastEvaluatedKey"],
)
keys.extend([item[self.primary_key] for item in response["Items"]])
return keys
|
DynamoDBChatStore
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/context_util_test.py
|
{
"start": 834,
"end": 5003
}
|
class ____(unittest.TestCase):
@parameterized.expand(
[
# Test case: URL with no page path
("https://example.com", {}, "https://example.com"),
# Test case: URL with page path that matches a page
(
"https://example.com/page1",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/",
),
# Test case: URL with page path that doesn't match any page
(
"https://example.com/unknown",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/unknown",
),
# Test case: URL with trailing slash
(
"https://example.com/page1/",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/",
),
# Test case: URL with multiple segments where the last segment matches a page
(
"https://example.com/path/to/page1",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/path/to/",
),
# Test case: URL with empty page path in pages
(
"https://example.com",
{"hash1": {"url_pathname": ""}},
"https://example.com",
),
# Test case: URL with multiple pages, one matching
(
"https://example.com/page2",
{
"hash1": {"url_pathname": "page1"},
"hash2": {"url_pathname": "page2"},
"hash3": {"url_pathname": "page3"},
},
"https://example.com/",
),
]
)
def test_maybe_trim_page_path(self, url, pages, expected):
"""Test that `maybe_trim_page_path` correctly trims page paths from URLs"""
# Create a mock PagesManager
mock_page_manager = MagicMock()
mock_page_manager.get_pages.return_value = pages
# Call the function and check the result
result = maybe_trim_page_path(url, mock_page_manager)
assert result == expected
@parameterized.expand(
[
# Test case: URL with no current page
("https://example.com", "", {}, "https://example.com"),
# Test case: URL with the current page that has a url_pathname
(
"https://example.com",
"hash1",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/page1",
),
# Test case: URL with current page that has no url_pathname
(
"https://example.com",
"hash1",
{"hash1": {"page_name": "Page 1"}},
"https://example.com",
),
# Test case: URL with current page that has empty url_pathname
(
"https://example.com",
"hash1",
{"hash1": {"url_pathname": ""}},
"https://example.com",
),
# Test case: URL with trailing slash
(
"https://example.com/",
"hash1",
{"hash1": {"url_pathname": "page1"}},
"https://example.com/page1",
),
# Test case: URL with the current page hash that doesn't exist in pages
(
"https://example.com",
"unknown",
{"hash1": {"url_pathname": "page1"}},
"https://example.com",
),
]
)
def test_maybe_add_page_path(self, url, current_hash, pages, expected):
"""Test that `maybe_add_page_path` correctly adds page paths to URLs"""
# Create a mock PagesManager
mock_page_manager = MagicMock()
mock_page_manager.current_page_script_hash = current_hash
mock_page_manager.get_pages.return_value = pages
# Call the function and check the result
result = maybe_add_page_path(url, mock_page_manager)
assert result == expected
|
ContextUtilTest
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/lib/shim_components/asset.py
|
{
"start": 224,
"end": 511
}
|
class ____(ShimScaffolder):
def get_text(self, request: ScaffoldRequest) -> str:
return f"""import dagster as dg
@dg.asset
def {request.target_path.stem}(context: dg.AssetExecutionContext) -> dg.MaterializeResult: ...
"""
scaffold_with(AssetScaffolder)(asset)
|
AssetScaffolder
|
python
|
jazzband__django-oauth-toolkit
|
tests/app/idp/idp/oauth.py
|
{
"start": 404,
"end": 1703
}
|
class ____(OAuth2Validator):
def validate_silent_login(self, request) -> None:
# request is an OAuthLib.common.Request and doesn't have the session
# or user of the django request. We will emulate the session and auth
# middleware here, since that is what the idp is using for auth. You
# may need to modify this if you are using a different session
# middleware or auth backend.
session_cookie_name = settings.SESSION_COOKIE_NAME
HTTP_COOKIE = request.headers.get("HTTP_COOKIE")
COOKIES = HTTP_COOKIE.split("; ")
for cookie in COOKIES:
cookie_name, cookie_value = cookie.split("=")
if cookie.startswith(session_cookie_name):
break
session_middleware = SessionMiddleware(get_response)
session = session_middleware.SessionStore(cookie_value)
# add session to request for compatibility with django.contrib.auth
request.session = session
# call the auth middleware to set request.user
auth_middleware = AuthenticationMiddleware(get_response)
auth_middleware.process_request(request)
return request.user.is_authenticated
def validate_silent_authorization(self, request) -> None:
return True
|
CustomOAuth2Validator
|
python
|
sympy__sympy
|
sympy/combinatorics/partitions.py
|
{
"start": 8806,
"end": 20822
}
|
class ____(Basic):
"""
This class represents an integer partition.
Explanation
===========
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
References
==========
.. [1] https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
Explanation
===========
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(partition.items(), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("All integer summands must be greater than one")
obj = Basic.__new__(cls, Integer(integer), Tuple(*partition))
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.core.random import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
|
IntegerPartition
|
python
|
pytorch__pytorch
|
torch/_inductor/template_heuristics/triton.py
|
{
"start": 3865,
"end": 4111
}
|
class ____(FlexBwDConfig):
"""
ROCm subclass for FlexAttn backward, with AMD backend specific tuneable kernargs
"""
matrix_instr_nonkdim: int = 0
waves_per_eu: int = 0
kpack: int = 2
@dataclasses.dataclass
|
ROCmFlexBwDConfig
|
python
|
numba__numba
|
numba/core/typing/npdatetime.py
|
{
"start": 5346,
"end": 5441
}
|
class ____(TimedeltaOrderedCmpOp):
key = operator.gt
@infer_global(operator.ge)
|
TimedeltaCmpGt
|
python
|
pypa__virtualenv
|
src/virtualenv/seed/wheels/util.py
|
{
"start": 3327,
"end": 3962
}
|
class ____:
#: the version bundled with virtualenv
bundle = "bundle"
embed = "embed"
#: custom version handlers
non_version = (bundle, embed)
@staticmethod
def of_version(value):
return None if value in Version.non_version else value
@staticmethod
def as_pip_req(distribution, version):
return f"{distribution}{Version.as_version_spec(version)}"
@staticmethod
def as_version_spec(version):
of_version = Version.of_version(version)
return "" if of_version is None else f"=={of_version}"
__all__ = [
"Version",
"Wheel",
"discover_wheels",
]
|
Version
|
python
|
huggingface__transformers
|
tests/models/markuplm/test_tokenization_markuplm.py
|
{
"start": 1218,
"end": 210630
}
|
class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "microsoft/markuplm-base"
tokenizer_class = MarkupLMTokenizer
rust_tokenizer_class = MarkupLMTokenizerFast
test_rust_tokenizer = True
from_pretrained_kwargs = {"cls_token": "<s>"}
test_seq2seq = False
input_text = "Hello😊 <s>intro</s> falsé-world! 生活的真谛"
integration_expected_tokens = ['Hello', 'ðŁĺ', 'Ĭ', 'Ġ', '<s>', 'int', 'ro', '</s>', 'Ġfals', 'é', '-', 'world', '!', 'Ġç', 'Ķ', 'Ł', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ'] # fmt: skip
integration_expected_token_ids = [31414, 18636, 27969, 0, 2544, 1001, 2, 506, 1536, 1140, 12, 8331, 328, 48998, 37127, 20024, 2023, 44574, 49122, 4333, 36484, 7487, 3726] # fmt: skip
expected_tokens_from_ids = ['Hello', 'ðŁĺ', 'Ĭ', '<s>', 'int', 'ro', '</s>', 'f', 'als', 'é', '-', 'world', '!', 'çĶŁ', 'æ', '´', '»', 'çļĦ', 'çľ', 'Ł', 'è', '°', 'Ľ'] # fmt: skip
integration_expected_decoded_text = "Hello😊<s>intro</s>falsé-world!生活的真谛"
text_from_tokens = "Hello😊 <s>intro</s> falsé-world! 生活的真谛"
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "<unk>",] # fmt: skip
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
cls.tags_dict = {"a": 0, "abbr": 1, "acronym": 2, "address": 3}
cls.special_tokens_map = {"unk_token": "<unk>"}
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
cls.tokenizer_config_file = os.path.join(cls.tmpdirname, "tokenizer_config.json")
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(cls.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
with open(cls.tokenizer_config_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps({"tags_dict": cls.tags_dict}))
def _run_integration_checks(self, tokenizer, tokenizer_type):
tokens = tokenizer.tokenize(self.input_text)
self.assertEqual(
tokens,
self.integration_expected_tokens,
f"Tokenized tokens don't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
ids = tokenizer.encode(self.input_text, add_special_tokens=False)
self.assertEqual(
ids,
self.integration_expected_token_ids,
f"Encoded IDs don't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
decoded_text = tokenizer.decode(self.integration_expected_token_ids, clean_up_tokenization_spaces=False)
self.assertEqual(
decoded_text,
self.integration_expected_decoded_text,
f"Decoded text doesn't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
tokens_from_ids = tokenizer.convert_ids_to_tokens(self.integration_expected_token_ids)
self.assertEqual(
tokens_from_ids,
self.expected_tokens_from_ids,
f"Tokens from IDs don't match expected for {tokenizer.__class__.__name__} ({tokenizer_type})",
)
def get_nodes_and_xpaths(self):
nodes = ["hello", "world"]
xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
return nodes, xpaths
def get_nodes_and_xpaths_batch(self):
nodes = [["hello world", "running"], ["hello my name is bob"]]
xpaths = [
["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"],
["/html/body/div/li[2]/div/span"],
]
return nodes, xpaths
def get_question_nodes_and_xpaths(self):
question = "what's his name?"
nodes = ["hello world"]
xpaths = ["/html/body/div/li[1]/div/span"] # , "/html/body/div/li[1]/div/span"]
return question, nodes, xpaths
def get_extracted_tokenizer(self, reference_tokenizer=None):
if reference_tokenizer is None:
reference_tokenizer = self.get_tokenizer()
try:
tokenizer_json_path = os.path.join(self.tmpdirname, "tokenizer.json")
if not os.path.exists(tokenizer_json_path):
return None
extractor = TokenizersExtractor(tokenizer_json_path)
vocab_ids, vocab_scores, merges, added_tokens_decoder = extractor.extract()
init_kwargs = {
"vocab": vocab_scores,
"merges": merges,
"do_lower_case": False,
"keep_accents": True,
"added_tokens_decoder": dict(added_tokens_decoder.items()),
}
tags_dict = getattr(reference_tokenizer, "tags_dict", None)
if tags_dict is None:
raise ValueError("MarkupLMTokenizer requires a tags_dict for initialization.")
init_kwargs["tags_dict"] = tags_dict
if self.from_pretrained_kwargs is not None:
init_kwargs.update(self.from_pretrained_kwargs)
return self.tokenizer_class(**init_kwargs)
except (TypeError, Exception):
raise
def get_question_nodes_and_xpaths_batch(self):
questions = ["what's his name?", "how is he called?"]
nodes = [["hello world", "running"], ["hello my name is bob"]]
xpaths = [
["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"],
["/html/body/div/li[2]/div/span"],
]
return questions, nodes, xpaths
def get_empty_nodes_and_xpaths(self):
nodes = ["test", "empty", ""]
xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
return nodes, xpaths
def get_empty_nodes_and_xpaths_batch(self):
nodes = [["test", "empty", ""], ["one", "more", "empty", ""]]
xpaths = [
["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"],
[
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
],
]
return nodes, xpaths
def get_empty_question_nodes_and_xpaths(self):
question = ""
nodes = ["test", "empty", ""]
xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"]
return question, nodes, xpaths
def get_empty_question_nodes_and_xpaths_batch(self):
questions = ["what's his name?", ""]
nodes = [["test", "empty", ""], ["one", "more", "empty", ""]]
xpaths = [
["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"],
[
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
"/html/body/div/li[2]/div/span",
],
]
return questions, nodes, xpaths
@unittest.skip(reason="Chat template tests don't play well with table/layout models.")
def test_chat_template_batched(self):
pass
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00e9d,running"
output_text = "unwanted, running"
return input_text, output_text
def convert_batch_encode_plus_format_to_encode_plus(self, batch_encode_plus_sequences):
first_key = next(iter(batch_encode_plus_sequences))
batch_size = len(batch_encode_plus_sequences[first_key])
encode_plus_sequences = []
for i in range(batch_size):
single = {}
for key, value in batch_encode_plus_sequences.items():
if key != "encodings":
single[key] = value[i]
encode_plus_sequences.append(single)
return encode_plus_sequences
def test_add_special_tokens(self):
tokenizers: list[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
special_token_xpath = "/html/body/div/li[1]/div/span"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(
[special_token], xpaths=[special_token_xpath], add_special_tokens=False
)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: list[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = [
AddedToken("aaaaa", rstrip=True, lstrip=True),
AddedToken("bbbbbb", rstrip=True, lstrip=True),
AddedToken("cccccccccdddddddd", rstrip=True, lstrip=True),
]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size + 3, vocab_size_2 + 3)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
nodes = "aaaaa bbbbbb low cccccccccdddddddd l".split()
xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))]
tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
nodes = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split()
xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))]
tokens = tokenizer.encode(
nodes,
xpaths=xpaths,
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(input.split(), xpaths=xpaths, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
@unittest.skip(reason="Not implemented")
def test_right_and_left_truncation(self):
pass
@parameterized.expand([(True,), (False,)])
def test_encode_plus_with_padding(self, use_padding_as_call_kwarg: bool):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, nodes)
padding_size = 10
padding_idx = tokenizer.pad_token_id
encoded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
nodes,
xpaths=xpaths,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(
nodes,
xpaths=xpaths,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
self.assertTrue(sequence_length == not_padded_sequence_length)
self.assertTrue(input_ids == not_padded_input_ids)
self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask)
# Test right padding
tokenizer_kwargs_right = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "right"
else:
tokenizer_kwargs_right["padding_side"] = "right"
right_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, **tokenizer_kwargs_right)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
self.assertTrue(sequence_length + padding_size == right_padded_sequence_length)
self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids)
self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask)
# Test left padding
tokenizer_kwargs_left = {
"max_length": sequence_length + padding_size,
"padding": "max_length",
"return_special_tokens_mask": True,
}
if not use_padding_as_call_kwarg:
tokenizer.padding_side = "left"
else:
tokenizer_kwargs_left["padding_side"] = "left"
left_padded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, **tokenizer_kwargs_left)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
self.assertTrue(sequence_length + padding_size == left_padded_sequence_length)
self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids)
self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask)
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert token_type_ids + [0] * padding_size == right_padded_token_type_ids
assert [0] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask)
self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
tokens = []
for word in nodes:
tokens.extend(tokenizer.tokenize(word))
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
nodes, xpaths = self.get_nodes_and_xpaths()
sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
attached_sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences)
)
# test 2: two sequences
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=False)
attached_sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding(self, max_length=50):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id)
pad_token_id = tokenizer_p.pad_token_id
# Encode - Simple input
nodes, xpaths = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode(nodes, xpaths=xpaths, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode(nodes, xpaths=xpaths, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(nodes, xpaths=xpaths, padding="longest")
input_p = tokenizer_p.encode(nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode - Pair input
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
input_r = tokenizer_r.encode(
question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode(
question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.encode(question, nodes, xpaths=xpaths, padding=True)
input_p = tokenizer_p.encode(question, nodes, xpaths=xpaths, padding="longest")
self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id)
# Encode_plus - Simple input
nodes, xpaths = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding="max_length")
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, padding="longest")
input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Encode_plus - Pair input
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(
question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length"
)
input_p = tokenizer_p.encode_plus(
question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length"
)
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
input_r = tokenizer_r.encode_plus(question, nodes, xpaths=xpaths, padding="longest")
input_p = tokenizer_p.encode_plus(question, nodes, xpaths=xpaths, padding=True)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"])
# Batch_encode_plus - Simple input
nodes, xpaths = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=max_length,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=max_length,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=max_length,
padding="longest",
)
input_p = tokenizer_p.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=max_length,
padding=True,
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, padding="longest")
input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, padding=True)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Batch_encode_plus - Pair input
questions, nodes, xpaths = self.get_question_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, nodes)),
is_pair=True,
xpaths=xpaths,
max_length=max_length,
truncation=True,
padding="max_length",
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, nodes)),
is_pair=True,
xpaths=xpaths,
max_length=max_length,
truncation=True,
padding="max_length",
)
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
input_r = tokenizer_r.batch_encode_plus(
list(zip(questions, nodes)),
is_pair=True,
xpaths=xpaths,
padding=True,
)
input_p = tokenizer_p.batch_encode_plus(
list(zip(questions, nodes)),
is_pair=True,
xpaths=xpaths,
padding="longest",
)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad on single examples after tokenization
nodes, xpaths = self.get_nodes_and_xpaths()
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p)
self.assert_padded_input_match(
input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id
)
# Using pad on single examples after tokenization
input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id)
# Using pad after tokenization
nodes, xpaths = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p)
self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id)
# Using pad after tokenization
nodes, xpaths = self.get_nodes_and_xpaths_batch()
input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length")
input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths)
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length")
self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id)
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Test not batched
nodes, xpaths = self.get_nodes_and_xpaths()
encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
nodes, xpaths = self.get_nodes_and_xpaths_batch()
encoded_sequences_1 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths)
encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths_batch()
encoded_sequences = [
tokenizer.encode_plus(nodes_example, xpaths=xpaths_example)
for nodes_example, xpaths_example in zip(nodes, xpaths)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, padding=False
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences_padded = [
tokenizer.encode_plus(
nodes_example, xpaths=xpaths_example, max_length=maximum_length, padding="max_length"
)
for nodes_example, xpaths_example in zip(nodes, xpaths)
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, padding=True
)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, padding=True
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, padding=False
)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1:
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip(reason="batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences = [
tokenizer.encode_plus(
nodes_example, xpaths=xpaths_example, max_length=max_length, padding="max_length"
)
for nodes_example, xpaths_example in zip(nodes, xpaths)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
nodes, xpaths = self.get_nodes_and_xpaths_batch()
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, nodes)
encoded_sequences = [
tokenizer.encode_plus(
nodes_example, xpaths=xpaths_example, max_length=max_length, padding="max_length"
)
for nodes_example, xpaths_example in zip(nodes, xpaths)
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.pad_token is None:
self.skipTest(reason="No padding token.")
else:
nodes, xpaths = self.get_nodes_and_xpaths()
# empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(nodes, xpaths=xpaths, padding=True, pad_to_multiple_of=8)
# for key, value in empty_tokens.items():
# self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(nodes, xpaths=xpaths, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(
nodes, xpaths=xpaths, padding=True, truncation=True, pad_to_multiple_of=8
)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# truncation to something which is not a multiple of pad_to_multiple_of raises an error
self.assertRaises(
ValueError,
tokenizer.__call__,
nodes,
xpaths=xpaths,
padding=True,
truncation=True,
max_length=12,
pad_to_multiple_of=8,
)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
nodes,
xpaths=xpaths,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
# Testing single inputs
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
nodes, xpaths=xpaths, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
nodes, xpaths = self.get_nodes_and_xpaths()
tmpdirname = tempfile.mkdtemp()
before_tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_nodes_and_xpaths()
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
nodes, xpaths=xpaths, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
nodes, xpaths=xpaths, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# test 1: single sequence
nodes, xpaths = self.get_nodes_and_xpaths()
output = tokenizer(nodes, xpaths=xpaths, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
self.assertNotIn(1, output["token_type_ids"])
# test 2: two sequences (question + nodes)
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
output = tokenizer(question, nodes, xpaths, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that the token type IDs have the same length as the attention mask
self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"]))
self.assertIn(0, output["token_type_ids"])
def test_offsets_mapping(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.get_tokenizer(pretrained_name, **kwargs)
text = ["a", "wonderful", "test"]
xpaths = ["html/body" for _ in range(len(text))]
# No pair
tokens_with_offsets = tokenizer_r.encode_plus(
text,
xpaths=xpaths,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
# Pairs
text = "what's his name"
pair = ["a", "wonderful", "test"]
xpaths = ["html/body" for _ in range(len(pair))]
tokens_with_offsets = tokenizer_r.encode_plus(
text,
pair,
xpaths=xpaths,
return_special_tokens_mask=True,
return_offsets_mapping=True,
add_special_tokens=True,
)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets["offset_mapping"]
# Assert there is the same number of tokens and offsets
self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"]))
# Assert there is online added_tokens special_tokens
self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens)
def test_embedded_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.get_tokenizer(pretrained_name, **kwargs)
nodes, xpaths = self.get_nodes_and_xpaths()
tokens_r = tokenizer_r(nodes, xpaths=xpaths, add_special_tokens=True)
tokens_p = tokenizer_p(nodes, xpaths=xpaths, add_special_tokens=True)
for key in tokens_p:
self.assertEqual(tokens_r[key], tokens_p[key])
if "token_type_ids" in tokens_r:
self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"]))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
nodes, xpaths = self.get_nodes_and_xpaths()
# tokenize()
no_special_tokens = tokenizer_r.tokenize(" ".join(nodes), add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(" ".join(nodes), add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode()
no_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add)
# encode_plus()
no_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
for key in no_special_tokens:
self.assertEqual(
len(no_special_tokens[key]),
len(with_special_tokens[key]) - simple_num_special_tokens_to_add,
)
# # batch_encode_plus
nodes, xpaths = self.get_nodes_and_xpaths_batch()
no_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=True)
for key in no_special_tokens:
for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add)
@slow
def test_markuplm_truncation_integration_test(self):
nodes, xpaths = self.get_nodes_and_xpaths()
tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base", model_max_length=512)
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, max_length=i, truncation=True)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True)
dropped_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True)
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if not tokenizer.is_fast:
continue
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0 = "Test this method."
seq_1 = ["With", "these", "inputs."]
xpaths = ["html/body" for _ in range(len(seq_1))]
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(seq_0.split(), xpaths=xpaths)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1, xpaths=xpaths)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_special_tokens_initialization(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
added_tokens = [AddedToken("<special>", lstrip=True)]
tokenizer_r = self.rust_tokenizer_class.from_pretrained(
pretrained_name, additional_special_tokens=added_tokens, **kwargs
)
nodes = "Hey this is a <special> token".split()
xpaths = ["html/body" for _ in range(len(nodes))]
r_output = tokenizer_r.encode(nodes, xpaths=xpaths)
special_token_id = tokenizer_r.encode(["<special>"], xpaths=["html/body"], add_special_tokens=False)[0]
self.assertTrue(special_token_id in r_output)
def test_training_new_tokenizer(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100)
# Test we can use the new tokenizer with something not seen during training
text = [["this", "is", "the"], ["how", "are", "you"]]
xpaths = [["html/body"] * 3, ["html/body"] * 3]
inputs = new_tokenizer(text, xpaths=xpaths)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = ( # original expected result "this is the" seems contradicts to FacebookAI/roberta-based tokenizer
"thisisthe"
)
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
# We check that the parameters of the tokenizer remained the same
# Check we have the same number of added_tokens for both pair and non-pair inputs.
self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False))
self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True))
# Check we have the correct max_length for both pair and non-pair inputs.
self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence)
self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair)
# Assert the set of special tokens match as we didn't ask to change them
self.assertSequenceEqual(
tokenizer.all_special_tokens,
new_tokenizer.all_special_tokens,
)
self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map)
def test_training_new_tokenizer_with_special_tokens_change(self):
# This feature only exists for fast tokenizers
if not self.test_rust_tokenizer:
self.skipTest(reason="test_rust_tokenizer is set to False")
tokenizer = self.get_tokenizer()
# Test with a special tokens map
class_signature = inspect.signature(tokenizer.__class__)
if "cls_token" in class_signature.parameters:
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: "<cls>"}
)
cls_id = new_tokenizer.get_vocab()["<cls>"]
self.assertEqual(new_tokenizer.cls_token, "<cls>")
self.assertEqual(new_tokenizer.cls_token_id, cls_id)
# Create a new mapping from the special tokens defined in the original tokenizer
special_tokens_list = PreTrainedTokenizerBase.SPECIAL_TOKENS_ATTRIBUTES.copy()
special_tokens_map = {}
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is not None:
special_token = getattr(tokenizer, token)
special_tokens_map[special_token] = f"{special_token}a"
# Train new tokenizer
new_tokenizer = tokenizer.train_new_from_iterator(
SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map
)
# Check the changes
for token in special_tokens_list:
# Get the private one to avoid unnecessary warnings.
if getattr(tokenizer, token) is None:
continue
special_token = getattr(tokenizer, token)
if special_token in special_tokens_map:
new_special_token = getattr(new_tokenizer, token)
self.assertEqual(special_tokens_map[special_token], new_special_token)
new_id = new_tokenizer.get_vocab()[new_special_token]
self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id)
# Check if the AddedToken / string format has been kept
for special_token in tokenizer.all_special_tokens:
if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens}",
)
elif isinstance(special_token, AddedToken):
# The special token must appear in the list of the new tokenizer as an object of type AddedToken with
# the same parameters as the old AddedToken except the content that the user has requested to change.
special_token_str = special_token.content
new_special_token_str = special_tokens_map[special_token_str]
find = False
for candidate in new_tokenizer.all_special_tokens:
if (
isinstance(candidate, AddedToken)
and candidate.content == new_special_token_str
and candidate.lstrip == special_token.lstrip
and candidate.rstrip == special_token.rstrip
and candidate.normalized == special_token.normalized
and candidate.single_word == special_token.single_word
):
find = True
break
self.assertTrue(
find,
f"'{new_special_token_str}' doesn't appear in the list "
f"'{new_tokenizer.all_special_tokens}' as an AddedToken with the same parameters as "
f"'{special_token}' in the list {tokenizer.all_special_tokens}",
)
elif special_token not in special_tokens_map:
# The special token must appear identically in the list of the new tokenizer.
self.assertTrue(
special_token in new_tokenizer.all_special_tokens,
f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}",
)
else:
# The special token must appear in the list of the new tokenizer as an object of type string.
self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens)
# Test we can use the new tokenizer with something not seen during training
nodes = [["this", "is"], ["hello", "🤗"]]
xpaths = [["html/body"] * 2, ["html/body"] * 2]
inputs = new_tokenizer(nodes, xpaths=xpaths)
self.assertEqual(len(inputs["input_ids"]), 2)
decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)
expected_result = "thisis" # same as line 1399
if tokenizer.backend_tokenizer.normalizer is not None:
expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result)
self.assertEqual(expected_result, decoded_input)
def test_batch_encode_dynamic_overflowing(self):
"""
When calling batch_encode with multiple sequences, it can return different number of
overflowing encoding for each sequence:
[
Sequence 1: [Encoding 1, Encoding 2],
Sequence 2: [Encoding 1],
Sequence 3: [Encoding 1, Encoding 2, ... Encoding N]
]
This needs to be padded so that it can represented as a tensor
"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"):
returned_tensor = "pt"
# Single example
nodes, xpaths = self.get_nodes_and_xpaths()
tokens = tokenizer.encode_plus(
nodes,
xpaths=xpaths,
max_length=1,
padding=True,
truncation=True,
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if "xpath" not in key:
self.assertEqual(len(tokens[key].shape), 2)
else:
self.assertEqual(len(tokens[key].shape), 3)
# Batch of examples
# For these 2 examples, 3 training examples will be created
nodes, xpaths = self.get_nodes_and_xpaths_batch()
tokens = tokenizer.batch_encode_plus(
nodes,
xpaths=xpaths,
max_length=6,
padding=True,
truncation="only_first",
return_tensors=returned_tensor,
return_overflowing_tokens=True,
)
for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()):
if "xpath" not in key:
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[-1], 6)
else:
self.assertEqual(len(tokens[key].shape), 3)
self.assertEqual(tokens[key].shape[-2], 6)
@unittest.skip(reason="TO DO: overwrite this very extensive test.")
def test_alignment_methods(self):
pass
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5):
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks))
toks = list(
filter(
lambda t: [t[0]]
== tokenizer.encode(t[1].split(" "), xpaths=len(t[1]) * ["html/body"], add_special_tokens=False),
toks,
)
)
if max_length is not None and len(toks) > max_length:
toks = toks[:max_length]
if min_length is not None and len(toks) < min_length and len(toks) > 0:
while len(toks) < min_length:
toks = toks + toks
# toks_str = [t[1] for t in toks]
toks_ids = [t[0] for t in toks]
# Ensure consistency
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
# an extra blank will cause inconsistency: ["a","b",] & "a b"
"""
if " " not in output_txt and len(toks_ids) > 1:
output_txt = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False)
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False)
)
"""
if with_prefix_space:
output_txt = " " + output_txt
nodes = output_txt.split(" ")
xpaths = ["html/body" for i in range(len(nodes))]
output_ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False)
return nodes, xpaths, output_ids
@unittest.skip(reason="This test is failing for fast")
def test_maximum_encoding_length_pair_input(self):
# slow part fixed, fast part not
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Build a sequence from our model's vocabulary
stride = 2
seq_0, xpaths_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
question_0 = " ".join(map(str, seq_0))
if len(ids) <= 2 + stride:
seq_0 = (seq_0 + " ") * (2 + stride)
ids = None
seq0_tokens = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)
self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride)
question_1 = "This is another sentence to be encoded."
seq_1 = ["hello", "world"]
xpaths_1 = ["html/body" for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2:
seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"]
seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False)
seq_1 = seq_1.split(" ")
xpaths_1 = ["html/body" for i in range(len(seq_1))]
seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride)
smallest = (
seq1_tokens["input_ids"]
if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"])
else seq0_tokens["input_ids"]
)
# We are not using the special tokens - a bit too hard to test all the tokenizers with this
# TODO try this again later
sequence = tokenizer(question_0, seq_1, xpaths=xpaths_1, add_special_tokens=False)
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = seq_0 * model_max_length
question_2 = " ".join(map(str, seq_2))
xpaths_2 = xpaths_0 * model_max_length
# assertgreater -> assertgreaterequal
self.assertGreaterEqual(len(seq_2), model_max_length)
sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
sequence2 = tokenizer(question_2, seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length2 = len(sequence2["input_ids"])
self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.")
self.assertGreater(
total_length2, model_max_length, "Issue with the testing sequence, please update it."
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"):
output = tokenizer(
question_2,
seq_1,
xpaths=xpaths_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"]), model_max_length)
output = tokenizer(
[question_2],
[seq_1],
xpaths=[xpaths_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length)
# Simple
output = tokenizer(
question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"]), model_max_length)
output = tokenizer(
[question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation="only_second"
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["xpath_tags_seq"]), model_max_length)
self.assertNotEqual(len(output["xpath_subs_seq"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(
[question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation=False
)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["xpath_tags_seq"][0]), model_max_length)
self.assertNotEqual(len(output["xpath_subs_seq"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens and xpath_tags_seq sequence with truncation
truncated_first_sequence = (
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"][:-2]
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"]
)
truncated_second_sequence = (
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"][:-2]
)
truncated_longest_sequence = (
truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence
)
overflow_first_sequence = (
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"][-(2 + stride) :]
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"]
)
overflow_second_sequence = (
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"][-(2 + stride) :]
)
overflow_longest_sequence = (
overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence
)
xpath_tags_seq_first = [[5] * 50] * (
len(tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]) - 2
)
xpath_tags_seq_first_sequence = (
xpath_tags_seq_first
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"]
)
overflowing_token_xpath_tags_seq_first_sequence_slow = [[5] * 50] * (2 + stride)
overflowing_token_xpath_tags_seq_first_sequence_fast = [[5] * 50] * (2 + stride) + tokenizer(
seq_1, xpaths=xpaths_1, add_special_tokens=False
)["xpath_tags_seq"]
xpath_tags_seq_second = [[5] * 50] * len(
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]
)
xpath_tags_seq_second_sequence = (
xpath_tags_seq_second
+ tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"][:-2]
)
overflowing_token_xpath_tags_seq_second_sequence_slow = tokenizer(
seq_1, xpaths=xpaths_1, add_special_tokens=False
)["xpath_tags_seq"][-(2 + stride) :]
overflowing_token_xpath_tags_seq_second_sequence_fast = [[5] * 50] * len(
tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]
) + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"][-(2 + stride) :]
xpath_tags_seq_longest_sequence = (
xpath_tags_seq_first_sequence
if len(seq0_tokens) > len(seq1_tokens)
else xpath_tags_seq_second_sequence
)
overflowing_token_xpath_tags_seq_longest_sequence_fast = (
overflowing_token_xpath_tags_seq_first_sequence_fast
if len(seq0_tokens) > len(seq1_tokens)
else overflowing_token_xpath_tags_seq_second_sequence_fast
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, MarkupLMTokenizerFast):
information = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
xpath_tags_seq = information["xpath_tags_seq"][0]
overflowing_xpath_tags_seq = information["xpath_tags_seq"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence)
self.assertEqual(len(overflowing_xpath_tags_seq), 2 + stride + len(smallest))
self.assertEqual(
overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast
)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="longest_first",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, MarkupLMTokenizerFast):
information = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
)
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
xpath_tags_seq = information["xpath_tags_seq"][0]
overflowing_xpath_tags_seq = information["xpath_tags_seq"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence)
self.assertEqual(
overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast
)
else:
# No overflowing tokens when using 'longest' in python tokenizers
with self.assertRaises(ValueError) as context:
information = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
)
self.assertTrue(
context.exception.args[0].startswith(
"Not possible to return overflowing tokens for pair of sequences with the "
"`longest_first`. Please select another truncation strategy than `longest_first`, "
"for instance `only_second` or `only_first`."
)
)
information_first_truncated = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_first",
return_overflowing_tokens=True,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information_first_truncated["input_ids"][0]
overflowing_tokens = information_first_truncated["input_ids"][1]
xpath_tags_seq = information_first_truncated["xpath_tags_seq"][0]
overflowing_xpath_tags_seq = information_first_truncated["xpath_tags_seq"][1]
self.assertEqual(len(information_first_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence)
# ISSUE HAPPENS HERE ↓
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_fast)
else:
truncated_sequence = information_first_truncated["input_ids"]
overflowing_tokens = information_first_truncated["overflowing_tokens"]
overflowing_xpath_tags_seq = information_first_truncated["overflowing_xpath_tags_seq"]
xpath_tags_seq = information_first_truncated["xpath_tags_seq"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_slow)
information_second_truncated = tokenizer(
question_0,
seq_1,
xpaths=xpaths_1,
max_length=len(sequence["input_ids"]) - 2,
add_special_tokens=False,
stride=stride,
truncation="only_second",
return_overflowing_tokens=True,
# add_prefix_space=False,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information_second_truncated["input_ids"][0]
overflowing_tokens = information_second_truncated["input_ids"][1]
xpath_tags_seq = information_second_truncated["xpath_tags_seq"][0]
overflowing_xpath_tags_seq = information_second_truncated["xpath_tags_seq"][1]
self.assertEqual(len(information_second_truncated["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"]))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_fast)
else:
truncated_sequence = information_second_truncated["input_ids"]
overflowing_tokens = information_second_truncated["overflowing_tokens"]
xpath_tags_seq = information_second_truncated["xpath_tags_seq"]
overflowing_xpath_tags_seq = information_second_truncated["overflowing_xpath_tags_seq"]
self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2)
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :])
self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence)
self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_slow)
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
seq_0, xpaths_0, ids = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)
total_length = len(sequence["input_ids"])
self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short")
# Test with max model input length
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = seq_0 * model_max_length
xpaths_1 = xpaths_0 * model_max_length
sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)
total_length1 = len(sequence1["input_ids"])
self.assertGreater(
total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short"
)
# Simple
padding_strategies = (
[False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False]
)
for padding_state in padding_strategies:
with self.subTest(f"Padding: {padding_state}"):
for truncation_state in [True, "longest_first", "only_first"]:
with self.subTest(f"Truncation: {truncation_state}"):
output = tokenizer(
seq_1,
xpaths=xpaths_1,
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"]), model_max_length)
output = tokenizer(
[seq_1],
xpaths=[xpaths_1],
padding=padding_state,
truncation=truncation_state,
)
self.assertEqual(len(output["input_ids"][0]), model_max_length)
self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length)
self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length)
# Simple with no truncation
# Reset warnings
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer(seq_1, xpaths=xpaths_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"]), model_max_length)
self.assertNotEqual(len(output["xpath_tags_seq"]), model_max_length)
self.assertNotEqual(len(output["xpath_subs_seq"]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
tokenizer.deprecation_warnings = {}
with self.assertLogs("transformers", level="WARNING") as cm:
output = tokenizer([seq_1], xpaths=[xpaths_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output["input_ids"][0]), model_max_length)
self.assertNotEqual(len(output["xpath_tags_seq"][0]), model_max_length)
self.assertNotEqual(len(output["xpath_subs_seq"][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(
cm.records[0].message.startswith(
"Token indices sequence length is longer than the specified maximum sequence length"
" for this model"
)
)
# Check the order of Sequence of input ids, overflowing tokens, xpath_tags_seq and xpath_subs_seq sequence with truncation
stride = 2
information = tokenizer(
seq_0,
xpaths=xpaths_0,
max_length=total_length - 2,
add_special_tokens=False,
stride=stride,
truncation=True,
return_overflowing_tokens=True,
)
# Overflowing tokens are handled quite differently in slow and fast tokenizers
if isinstance(tokenizer, MarkupLMTokenizerFast):
truncated_sequence = information["input_ids"][0]
overflowing_tokens = information["input_ids"][1]
xpath_tags_seq = information["xpath_tags_seq"][0]
overflowing_xpath_tags_seq = information["xpath_tags_seq"][1]
self.assertEqual(len(information["input_ids"]), 2)
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(xpath_tags_seq, sequence["xpath_tags_seq"][:-2])
self.assertEqual(overflowing_xpath_tags_seq, sequence["xpath_tags_seq"][-(2 + stride) :])
else:
truncated_sequence = information["input_ids"]
overflowing_tokens = information["overflowing_tokens"]
xpath_tags_seq = information["xpath_tags_seq"]
overflowing_xpath_tags_seq = information["overflowing_xpath_tags_seq"]
self.assertEqual(len(truncated_sequence), total_length - 2)
self.assertEqual(truncated_sequence, sequence["input_ids"][:-2])
self.assertEqual(len(overflowing_tokens), 2 + stride)
self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :])
self.assertEqual(xpath_tags_seq, sequence["xpath_tags_seq"][:-2])
self.assertEqual(overflowing_xpath_tags_seq, sequence["xpath_tags_seq"][-(2 + stride) :])
@unittest.skip(reason="MarkupLM tokenizer requires xpaths besides sequences.")
def test_pretokenized_inputs(self):
pass
@unittest.skip(reason="MarkupLM tokenizer always expects pretokenized inputs.")
def test_compare_pretokenized_inputs(self):
pass
@unittest.skip(reason="MarkupLM fast tokenizer does not support prepare_for_model")
def test_compare_prepare_for_model(self):
pass
@slow
def test_only_label_first_subword(self):
nodes = ["hello", "niels"]
xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))]
node_labels = [0, 1]
# test slow tokenizer
tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base")
encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base", only_label_first_subword=False)
encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
# test fast tokenizer
tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base")
encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100])
tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base", only_label_first_subword=False)
encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels)
self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100])
def test_markuplm_integration_test(self):
tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base")
tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base")
# There are 3 cases:
# CASE 1: document image classification (training + inference), document image token classification (inference),
# in which case only nodes and normalized bounding xpaths are provided to the tokenizer
# CASE 2: document image token classification (training),
# in which case one also provides word labels to the tokenizer
# CASE 3: document image visual question answering (inference),
# in which case one also provides a question to the tokenizer
# We need to test all 3 cases both on batched and non-batched inputs.
# CASE 1: not batched
nodes, xpaths = self.get_nodes_and_xpaths()
expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 1: batched
nodes, xpaths = self.get_nodes_and_xpaths_batch()
expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: not batched
nodes, xpaths = self.get_nodes_and_xpaths()
node_labels = [1, 2, 3]
expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 2: batched
nodes, xpaths = self.get_nodes_and_xpaths_batch()
node_labels = [[1, 2, 3], [2, 46, 17, 22, 3]]
expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, -100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20)
encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: not batched
question, nodes, xpaths = self.get_question_nodes_and_xpaths()
expected_results = {'input_ids': [0, 12196, 18, 39, 766, 116, 2, 42891, 232, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} # fmt: skip
encoding_p = tokenizer_p(question, nodes, xpaths, padding="max_length", max_length=20)
encoding_r = tokenizer_r(question, nodes, xpaths, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
# CASE 3: batched
questions, nodes, xpaths = self.get_question_nodes_and_xpaths_batch()
expected_results = {'input_ids': [[0, 12196, 18, 39, 766, 116, 2, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 9178, 16, 37, 373, 116, 2, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 1, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [0, 0, 0, 2, 0, 0, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]]} # fmt: skip
encoding_p = tokenizer_p(questions, nodes, xpaths, padding="max_length", max_length=20)
encoding_r = tokenizer_r(questions, nodes, xpaths, padding="max_length", max_length=20)
self.assertDictEqual(dict(encoding_p), expected_results)
self.assertDictEqual(dict(encoding_r), expected_results)
@unittest.skip(reason="Doesn't support returning Numpy arrays")
def test_np_encode_plus_sent_to_model(self):
pass
@unittest.skip(reason="Chat is not supported")
def test_chat_template(self):
pass
@unittest.skip(reason="The model tested fails `Hub -> Fast == Hub -> Slow`, nothing much we can do")
def test_added_tokens_serialization(self):
pass
@unittest.skip("Chat is not supported")
def test_chat_template_return_assistant_tokens_mask(self):
pass
@unittest.skip("Chat is not supported")
def test_chat_template_return_assistant_tokens_mask_truncated(self):
pass
def test_empty_input_string(self):
tokenizer_return_type = []
output_tensor_type = []
if is_torch_available():
import numpy as np
import torch
tokenizer_return_type.append("pt")
output_tensor_type.append(torch.int64)
tokenizer_return_type.append("np")
output_tensor_type.append(np.int64)
if is_mlx_available():
import mlx.core as mx
tokenizer_return_type.append("mlx")
output_tensor_type.append(mx.int32)
if len(tokenizer_return_type) == 0:
self.skipTest(reason="No expected framework from PT, or MLX found")
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
nodes, xpaths = self.get_empty_nodes_and_xpaths()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(nodes, xpaths=xpaths, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
question, nodes, xpaths = self.get_empty_question_nodes_and_xpaths()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(nodes, xpaths=xpaths, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
nodes, xpaths = self.get_empty_nodes_and_xpaths_batch()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(nodes, xpaths=xpaths, padding=True, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
question, nodes, xpaths = self.get_empty_question_nodes_and_xpaths_batch()
for return_type, target_type in zip(tokenizer_return_type, output_tensor_type):
output = tokenizer(nodes, xpaths=xpaths, padding=True, return_tensors=return_type)
self.assertEqual(output.input_ids.dtype, target_type)
|
MarkupLMTokenizationTest
|
python
|
django__django
|
django/db/models/lookups.py
|
{
"start": 24773,
"end": 26451
}
|
class ____(Lookup):
def year_lookup_bounds(self, connection, year):
from django.db.models.functions import ExtractIsoYear
iso_year = isinstance(self.lhs, ExtractIsoYear)
output_field = self.lhs.lhs.output_field
if isinstance(output_field, DateTimeField):
bounds = connection.ops.year_lookup_bounds_for_datetime_field(
year,
iso_year=iso_year,
)
else:
bounds = connection.ops.year_lookup_bounds_for_date_field(
year,
iso_year=iso_year,
)
return bounds
def as_sql(self, compiler, connection):
# Avoid the extract operation if the rhs is a direct value to allow
# indexes to be used.
if self.rhs_is_direct_value():
# Skip the extract part by directly using the originating field,
# that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, _ = self.process_rhs(compiler, connection)
rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql)
start, finish = self.year_lookup_bounds(connection, self.rhs)
params = (*params, *self.get_bound_params(start, finish))
return "%s %s" % (lhs_sql, rhs_sql), params
return super().as_sql(compiler, connection)
def get_direct_rhs_sql(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
def get_bound_params(self, start, finish):
raise NotImplementedError(
"subclasses of YearLookup must provide a get_bound_params() method"
)
|
YearLookup
|
python
|
pallets__jinja
|
tests/test_bytecode_cache.py
|
{
"start": 420,
"end": 644
}
|
class ____:
def test_simple(self, env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
|
TestByteCodeCache
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.