language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | python/ray/serve/_private/long_poll.py | {
"start": 1477,
"end": 1725
} | class ____(Enum):
def __repr__(self):
return f"{self.__class__.__name__}.{self.name}"
DEPLOYMENT_TARGETS = auto()
ROUTE_TABLE = auto()
GLOBAL_LOGGING_CONFIG = auto()
DEPLOYMENT_CONFIG = auto()
@dataclass
| LongPollNamespace |
python | keras-team__keras | keras/src/optimizers/optimizer.py | {
"start": 664,
"end": 870
} | class ____(BackendOptimizer, base_optimizer.BaseOptimizer):
pass
Optimizer.__doc__ = base_optimizer.BaseOptimizer.__doc__
base_optimizer_keyword_args = base_optimizer.base_optimizer_keyword_args
| Optimizer |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_logging_sink.py | {
"start": 12452,
"end": 14811
} | class ____(GoogleCloudBaseOperator):
"""
Lists Cloud Logging export sinks in a Google Cloud project.
:param project_id: Required. The ID of the Google Cloud project to list sinks from.
:param page_size: Optional. The maximum number of sinks to return per page. Must be greater than 0.
If None, the server will use a default value.
:param gcp_conn_id: Optional. The connection ID used to connect to Google Cloud.
Defaults to "google_cloud_default".
:param impersonation_chain: Optional. Service account or chained list of accounts to impersonate.
If a string, the service account must grant the originating account the
'Service Account Token Creator' IAM role.
If a sequence, each account in the chain must grant this role to the next.
The first account must grant it to the originating account (templated).
"""
template_fields: Sequence[str] = ("project_id", "gcp_conn_id", "impersonation_chain", "page_size")
def __init__(
self,
project_id: str,
page_size: int | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.page_size = page_size
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> list[dict[str, Any]]:
"""Execute the operator."""
_validate_inputs(self, ["project_id"])
if self.page_size is not None and self.page_size < 1:
raise AirflowException("The page_size for the list sinks request must be greater than zero")
hook = CloudLoggingHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
try:
self.log.info("Listing log sinks in project '%s'", self.project_id)
sinks = hook.list_sinks(project_id=self.project_id, page_size=self.page_size)
result = [LogSink.to_dict(sink) for sink in sinks]
self.log.info("Found %d log sinks", len(result))
return result
except google.cloud.exceptions.GoogleCloudError as e:
self.log.error("An error occurred. Exiting.")
raise e
| CloudLoggingListSinksOperator |
python | ray-project__ray | python/ray/train/tests/test_gpu_2.py | {
"start": 512,
"end": 860
} | class ____(LinearDataset):
"""Modifies the LinearDataset to also return non-tensor objects."""
def __getitem__(self, index):
return {"x": self.x[index, None], "y": 2}
# Currently in DataParallelTrainers we only report metrics from rank 0.
# For testing purposes here, we need to be able to report from all
# workers.
| NonTensorDataset |
python | google__jax | jax/_src/state/types.py | {
"start": 12014,
"end": 18595
} | class ____(core.AbstractValue):
"""Abstract mutable array reference.
Refer to the `Ref guide`_ for more information.
.. _Ref guide: https://docs.jax.dev/en/latest/array_refs.html
"""
__slots__ = ["inner_aval", "memory_space", "kind"]
def __init__(self, inner_aval: core.AbstractValue, memory_space: Any = None,
kind: Any = None):
self.inner_aval = inner_aval
self.memory_space = memory_space
self.kind = kind
@property
def is_high(self):
return self.inner_aval.is_high
def lo_ty(self):
return map(AbstractRef, self.inner_aval.lo_ty())
def lower_val(self, ref):
if not self.is_high:
return [ref]
return self.inner_aval.lower_val(ref._refs) # type: ignore
def raise_val(self, *vals):
if not self.is_high:
ref, = vals
return ref
return core.Ref(self, self.inner_aval.raise_val(*vals)) # type: ignore
@property
def weak_type(self) -> bool:
if not hasattr(self.inner_aval, "weak_type"):
raise AttributeError
return self.inner_aval.weak_type
def update_weak_type(self, weak_type):
return self.update(inner_aval=self.inner_aval.update_weak_type(weak_type))
def update(self, inner_aval=None, memory_space=None, kind=None):
inner_aval = self.inner_aval if inner_aval is None else inner_aval
memory_space = self.memory_space if memory_space is None else memory_space
kind = self.kind if kind is None else kind
return AbstractRef(inner_aval, memory_space, kind)
ndim = property(lambda self: len(self.shape))
size = property(lambda self: math.prod(self.shape))
def _len(self, ignored_tracer) -> int:
try:
return self.shape[0]
except IndexError as err:
raise TypeError("len() of unsized object") from err # same as numpy error
@property
def shape(self):
try:
return self.inner_aval.shape # pytype: disable=attribute-error
except AttributeError:
raise AttributeError(
f"{self!r} has no `shape`."
) from None
@property
def dtype(self):
try:
return self.inner_aval.dtype # pytype: disable=attribute-error
except AttributeError:
raise AttributeError(
f"{self!r} has no `dtype`."
) from None
@property
def sharding(self):
try:
return self.inner_aval.sharding # pytype: disable=attribute-error
except AttributeError:
raise AttributeError(
f"{self!r} has no `sharding`."
) from None
@property
def vma(self):
try:
return self.inner_aval.vma # pytype: disable=attribute-error
except AttributeError:
raise AttributeError(
f"{self!r} has no `vma`."
) from None
@core.aval_property
def at(self):
return RefIndexer(self)
@core.aval_method
def bitcast(self, dtype):
return TransformedRef(self, ()).bitcast(dtype)
@core.aval_method
def reshape(self, *shape):
return TransformedRef(self, ()).reshape(*shape)
@core.aval_method
def transpose(self, *permutation):
return TransformedRef(self, ()).transpose(*permutation)
@core.aval_property
def T(self):
return TransformedRef(self, ()).T
@core.aval_method
@staticmethod
def get(tracer, idx=()):
from jax._src.state.primitives import ref_get # pytype: disable=import-error
return ref_get(tracer, idx)
@core.aval_method
@staticmethod
def swap(tracer, value, idx=()):
from jax._src.state.primitives import ref_swap # pytype: disable=import-error
return ref_swap(tracer, idx, value)
@core.aval_method
@staticmethod
def set(tracer, value, idx=()):
from jax._src.state.primitives import ref_set # pytype: disable=import-error
return ref_set(tracer, idx, value)
@core.aval_method
@staticmethod
def addupdate(tracer, value, idx=()):
from jax._src.state.primitives import ref_addupdate # pytype: disable=import-error
ref_addupdate(tracer, idx, value)
def _getitem(self, tracer, idx) -> Array:
from jax._src.state.primitives import ref_get # pytype: disable=import-error
return ref_get(tracer, idx)
def _setitem(self, tracer, idx, value) -> None:
from jax._src.state.primitives import ref_set # pytype: disable=import-error
return ref_set(tracer, idx, value)
def _addupdate(self, tracer, idx, value):
from jax._src.state.primitives import ref_addupdate # pytype: disable=import-error
ref_addupdate(tracer, idx, value)
def str_short(self, short_dtypes=False, mesh_axis_types=False) -> str:
inner_aval_str = self.inner_aval.str_short(
short_dtypes=short_dtypes,
mesh_axis_types=mesh_axis_types,
)
if self.memory_space is not None:
return f'Ref<{self.memory_space}>{{{inner_aval_str}}}'
return f'Ref{{{inner_aval_str}}}'
def __repr__(self) -> str:
return self.str_short()
__str__ = __repr__
def to_tangent_aval(self):
return AbstractRef(self.inner_aval.to_tangent_aval(), self.memory_space, kind=self.kind)
def __eq__(self, other):
return (type(self) is type(other) and self.inner_aval == other.inner_aval
and self.memory_space == other.memory_space)
def __hash__(self):
return hash((self.__class__, self.inner_aval, self.memory_space))
def _map_ref(size, axis, ref_aval):
return AbstractRef(core.mapped_aval(size, axis, ref_aval.inner_aval),
ref_aval.memory_space, ref_aval.kind)
def _unmap_ref(size, axis, explicit_mesh_axis, ref_aval):
return AbstractRef(core.unmapped_aval(
size, axis, ref_aval.inner_aval, explicit_mesh_axis),
ref_aval.memory_space, ref_aval.kind)
core.aval_mapping_handlers[AbstractRef] = (_map_ref, _unmap_ref)
def get_ref_state_effects(
avals: Sequence[core.AbstractValue],
effects: core.Effects) -> list[set[StateEffect]]:
return [{eff for eff in effects
if isinstance(eff, (ReadEffect, WriteEffect, AccumEffect))
and eff.input_index == i} for i, _ in enumerate(avals)]
def shaped_array_ref(
shape: tuple[int, ...], dtype, weak_type: bool = False) -> AbstractRef:
return AbstractRef(core.ShapedArray(shape, dtype, weak_type=weak_type))
def _shard_ref(mesh, auto, check_rep, names, ref_aval: AbstractRef):
aval = core.shard_aval(mesh, auto, check_rep, names, ref_aval.inner_aval)
return AbstractRef(aval)
core.shard_aval_handlers[AbstractRef] = _shard_ref
def _unshard_ref(mesh, check_rep, names, ref_aval: AbstractRef):
raise TypeError("can't unshard a ref")
core.unshard_aval_handlers[AbstractRef] = _unshard_ref
# Sentinel type for indicating an uninitialized value.
| AbstractRef |
python | walkccc__LeetCode | solutions/1160. Find Words That Can Be Formed by Characters/1160.py | {
"start": 0,
"end": 346
} | class ____:
def countCharacters(self, words: list[str], chars: str) -> int:
ans = 0
count = collections.Counter(chars)
for word in words:
tempCount = count.copy()
for c in word:
tempCount[c] -= 1
if tempCount[c] < 0:
ans -= len(word)
break
ans += len(word)
return ans
| Solution |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 74416,
"end": 76129
} | class ____(TestCase):
def setUp(self):
self.model = PollWithSeveralManyToMany
self.history_model = self.model.history.model
self.place = Place.objects.create(name="Home")
self.book = Book.objects.create(isbn="1234")
self.restaurant = Restaurant.objects.create(rating=1)
self.poll = PollWithSeveralManyToMany.objects.create(
question="what's up?", pub_date=today
)
def test_separation(self):
self.assertEqual(self.poll.history.all().count(), 1)
self.poll.places.add(self.place)
self.poll.books.add(self.book)
self.poll.restaurants.add(self.restaurant)
self.assertEqual(self.poll.history.all().count(), 4)
restaurant, book, place, add = self.poll.history.all()
self.assertEqual(restaurant.restaurants.all().count(), 1)
self.assertEqual(restaurant.books.all().count(), 1)
self.assertEqual(restaurant.places.all().count(), 1)
self.assertEqual(restaurant.restaurants.first().restaurant, self.restaurant)
self.assertEqual(book.restaurants.all().count(), 0)
self.assertEqual(book.books.all().count(), 1)
self.assertEqual(book.places.all().count(), 1)
self.assertEqual(book.books.first().book, self.book)
self.assertEqual(place.restaurants.all().count(), 0)
self.assertEqual(place.books.all().count(), 0)
self.assertEqual(place.places.all().count(), 1)
self.assertEqual(place.places.first().place, self.place)
self.assertEqual(add.restaurants.all().count(), 0)
self.assertEqual(add.books.all().count(), 0)
self.assertEqual(add.places.all().count(), 0)
| SeveralManyToManyTest |
python | pyca__cryptography | tests/hazmat/primitives/test_hash_vectors.py | {
"start": 864,
"end": 1212
} | class ____:
test_sha224 = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "SHA2"),
["SHA224LongMsg.rsp", "SHA224ShortMsg.rsp"],
hashes.SHA224(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA256()),
skip_message="Does not support SHA256",
)
| TestSHA224 |
python | numpy__numpy | numpy/matrixlib/tests/test_matrix_linalg.py | {
"start": 2180,
"end": 2231
} | class ____(_TestQR):
array = np.matrix
| TestQRMatrix |
python | spack__spack | lib/spack/spack/filesystem_view.py | {
"start": 30844,
"end": 31208
} | class ____(SpackError):
"""Raised when a view has a projections file and is given one manually."""
def is_folder_on_case_insensitive_filesystem(path: str) -> bool:
with tempfile.NamedTemporaryFile(dir=path, prefix=".sentinel") as sentinel:
return os.path.exists(os.path.join(path, os.path.basename(sentinel.name).upper()))
| ConflictingProjectionsError |
python | ApeWorX__ape | src/ape_ethereum/multicall/exceptions.py | {
"start": 42,
"end": 93
} | class ____(ApeException):
pass
| MulticallException |
python | aio-libs__aiohttp | aiohttp/streams.py | {
"start": 16716,
"end": 18337
} | class ____(StreamReader): # lgtm [py/missing-call-to-init]
__slots__ = ("_read_eof_chunk",)
def __init__(self) -> None:
self._read_eof_chunk = False
self.total_bytes = 0
def __repr__(self) -> str:
return "<%s>" % self.__class__.__name__
def exception(self) -> BaseException | None:
return None
def set_exception(
self,
exc: type[BaseException] | BaseException,
exc_cause: BaseException = _EXC_SENTINEL,
) -> None:
pass
def on_eof(self, callback: Callable[[], None]) -> None:
try:
callback()
except Exception:
internal_logger.exception("Exception in eof callback")
def feed_eof(self) -> None:
pass
def is_eof(self) -> bool:
return True
def at_eof(self) -> bool:
return True
async def wait_eof(self) -> None:
return
def feed_data(self, data: bytes) -> None:
pass
async def readline(self) -> bytes:
return b""
async def read(self, n: int = -1) -> bytes:
return b""
# TODO add async def readuntil
async def readany(self) -> bytes:
return b""
async def readchunk(self) -> tuple[bytes, bool]:
if not self._read_eof_chunk:
self._read_eof_chunk = True
return (b"", False)
return (b"", True)
async def readexactly(self, n: int) -> bytes:
raise asyncio.IncompleteReadError(b"", n)
def read_nowait(self, n: int = -1) -> bytes:
return b""
EMPTY_PAYLOAD: Final[StreamReader] = EmptyStreamReader()
| EmptyStreamReader |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/protocol.py | {
"start": 1378,
"end": 1465
} | class ____(typing.TypedDict):
id: str
status: str
message: str
| ConnectionInfo |
python | python-openxml__python-docx | src/docx/table.py | {
"start": 18546,
"end": 19477
} | class ____(Parented):
"""Sequence of |_Row| objects corresponding to the rows in a table.
Supports ``len()``, iteration, indexed access, and slicing.
"""
def __init__(self, tbl: CT_Tbl, parent: TableParent):
super(_Rows, self).__init__(parent)
self._parent = parent
self._tbl = tbl
@overload
def __getitem__(self, idx: int) -> _Row: ...
@overload
def __getitem__(self, idx: slice) -> list[_Row]: ...
def __getitem__(self, idx: int | slice) -> _Row | list[_Row]:
"""Provide indexed access, (e.g. `rows[0]` or `rows[1:3]`)"""
return list(self)[idx]
def __iter__(self):
return (_Row(tr, self) for tr in self._tbl.tr_lst)
def __len__(self):
return len(self._tbl.tr_lst)
@property
def table(self) -> Table:
"""Reference to the |Table| object this row collection belongs to."""
return self._parent.table
| _Rows |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 25739,
"end": 26089
} | class ____:
"""Information about the request routing.
It includes deployment name (from ReplicaID), replica tag (from ReplicaID),
multiplex model ids, and routing stats.
"""
replica_id: ReplicaID
multiplexed_model_ids: Optional[List[str]] = None
routing_stats: Optional[Dict[str, Any]] = None
@dataclass
| RequestRoutingInfo |
python | readthedocs__readthedocs.org | readthedocs/projects/forms.py | {
"start": 43972,
"end": 45749
} | class ____(forms.ModelForm):
"""
Form to add an EnvironmentVariable to a Project.
This limits the name of the variable.
"""
project = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = EnvironmentVariable
fields = ("name", "value", "public", "project")
def __init__(self, *args, **kwargs):
self.project = kwargs.pop("project", None)
super().__init__(*args, **kwargs)
# Remove the nullable option from the form.
# TODO: remove after migration.
self.fields["public"].widget = forms.CheckboxInput()
self.fields["public"].empty_value = False
def clean_project(self):
return self.project
def clean_name(self):
"""Validate environment variable name chosen."""
name = self.cleaned_data["name"]
if name.startswith("__"):
raise forms.ValidationError(
_("Variable name can't start with __ (double underscore)"),
)
if name.startswith("READTHEDOCS"):
raise forms.ValidationError(
_("Variable name can't start with READTHEDOCS"),
)
if self.project.environmentvariable_set.filter(name=name).exists():
raise forms.ValidationError(
_(
"There is already a variable with this name for this project",
),
)
if " " in name:
raise forms.ValidationError(
_("Variable name can't contain spaces"),
)
if not fullmatch("[a-zA-Z0-9_]+", name):
raise forms.ValidationError(
_("Only letters, numbers and underscore are allowed"),
)
return name
| EnvironmentVariableForm |
python | jazzband__django-simple-history | simple_history/tests/tests/test_models.py | {
"start": 63320,
"end": 63752
} | class ____(TestCase):
@staticmethod
def get_table_name(manager):
return manager.model._meta.db_table
def test_custom_table_name(self):
self.assertEqual(self.get_table_name(Contact.history), "contacts_history")
def test_custom_table_name_from_register(self):
self.assertEqual(
self.get_table_name(ContactRegister.history), "contacts_register_history"
)
| CustomTableNameTest1 |
python | sympy__sympy | sympy/printing/fortran.py | {
"start": 2083,
"end": 28982
} | class ____(CodePrinter):
"""A printer to convert SymPy expressions to strings of Fortran code"""
printmethod = "_fcode"
language = "Fortran"
type_aliases = {
integer: int32,
real: float64,
complex_: complex128,
}
type_mappings = {
intc: 'integer(c_int)',
float32: 'real*4', # real(kind(0.e0))
float64: 'real*8', # real(kind(0.d0))
float80: 'real*10', # real(kind(????))
complex64: 'complex*8',
complex128: 'complex*16',
int8: 'integer*1',
int16: 'integer*2',
int32: 'integer*4',
int64: 'integer*8',
bool_: 'logical'
}
type_modules = {
intc: {'iso_c_binding': 'c_int'}
}
_default_settings: dict[str, Any] = dict(CodePrinter._default_settings, **{
'precision': 17,
'user_functions': {},
'source_format': 'fixed',
'contract': True,
'standard': 77,
'name_mangling': True,
})
_operators = {
'and': '.and.',
'or': '.or.',
'xor': '.neqv.',
'equivalent': '.eqv.',
'not': '.not. ',
}
_relationals = {
'!=': '/=',
}
def __init__(self, settings=None):
if not settings:
settings = {}
self.mangled_symbols = {} # Dict showing mapping of all words
self.used_name = []
self.type_aliases = dict(chain(self.type_aliases.items(),
settings.pop('type_aliases', {}).items()))
self.type_mappings = dict(chain(self.type_mappings.items(),
settings.pop('type_mappings', {}).items()))
super().__init__(settings)
self.known_functions = dict(known_functions)
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
# leading columns depend on fixed or free format
standards = {66, 77, 90, 95, 2003, 2008}
if self._settings['standard'] not in standards:
raise ValueError("Unknown Fortran standard: %s" % self._settings[
'standard'])
self.module_uses = defaultdict(set) # e.g.: use iso_c_binding, only: c_int
@property
def _lead(self):
if self._settings['source_format'] == 'fixed':
return {'code': " ", 'cont': " @ ", 'comment': "C "}
elif self._settings['source_format'] == 'free':
return {'code': "", 'cont': " ", 'comment': "! "}
else:
raise ValueError("Unknown source format: %s" % self._settings['source_format'])
def _print_Symbol(self, expr):
if self._settings['name_mangling'] == True:
if expr not in self.mangled_symbols:
name = expr.name
while name.lower() in self.used_name:
name += '_'
self.used_name.append(name.lower())
if name == expr.name:
self.mangled_symbols[expr] = expr
else:
self.mangled_symbols[expr] = Symbol(name)
expr = expr.xreplace(self.mangled_symbols)
name = super()._print_Symbol(expr)
return name
def _rate_index_position(self, p):
return -p*5
def _get_statement(self, codestring):
return codestring
def _get_comment(self, text):
return "! {}".format(text)
def _declare_number_const(self, name, value):
return "parameter ({} = {})".format(name, self._print(value))
def _print_NumberSymbol(self, expr):
# A Number symbol that is not implemented here or with _printmethod
# is registered and evaluated
self._number_symbols.add((expr, Float(expr.evalf(self._settings['precision']))))
return str(expr)
def _format_code(self, lines):
return self._wrap_fortran(self.indent_code(lines))
def _traverse_matrix_indices(self, mat):
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# fortran arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("do %s = %s, %s" % (var, start, stop))
close_lines.append("end do")
return open_lines, close_lines
def _print_sign(self, expr):
from sympy.functions.elementary.complexes import Abs
arg, = expr.args
if arg.is_integer:
new_expr = merge(0, isign(1, arg), Eq(arg, 0))
elif (arg.is_complex or arg.is_infinite):
new_expr = merge(cmplx(literal_dp(0), literal_dp(0)), arg/Abs(arg), Eq(Abs(arg), literal_dp(0)))
else:
new_expr = merge(literal_dp(0), dsign(literal_dp(1), arg), Eq(arg, literal_dp(0)))
return self._print(new_expr)
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if expr.has(Assignment):
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s) then" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("else if (%s) then" % self._print(c))
lines.append(self._print(e))
lines.append("end if")
return "\n".join(lines)
elif self._settings["standard"] >= 95:
# Only supported in F95 and newer:
# The piecewise was used in an expression, need to do inline
# operators. This has the downside that inline operators will
# not work for statements that span multiple lines (Matrix or
# Indexed expressions).
pattern = "merge({T}, {F}, {COND})"
code = self._print(expr.args[-1].expr)
terms = list(expr.args[:-1])
while terms:
e, c = terms.pop()
expr = self._print(e)
cond = self._print(c)
code = pattern.format(T=expr, F=code, COND=cond)
return code
else:
# `merge` is not supported prior to F95
raise NotImplementedError("Using Piecewise as an expression using "
"inline operators is not supported in "
"standards earlier than Fortran95.")
def _print_MatrixElement(self, expr):
return "{}({}, {})".format(self.parenthesize(expr.parent,
PRECEDENCE["Atom"], strict=True), expr.i + 1, expr.j + 1)
def _print_Add(self, expr):
# purpose: print complex numbers nicely in Fortran.
# collect the purely real and purely imaginary parts:
pure_real = []
pure_imaginary = []
mixed = []
for arg in expr.args:
if arg.is_number and arg.is_real:
pure_real.append(arg)
elif arg.is_number and arg.is_imaginary:
pure_imaginary.append(arg)
else:
mixed.append(arg)
if pure_imaginary:
if mixed:
PREC = precedence(expr)
term = Add(*mixed)
t = self._print(term)
if t.startswith('-'):
sign = "-"
t = t[1:]
else:
sign = "+"
if precedence(term) < PREC:
t = "(%s)" % t
return "cmplx(%s,%s) %s %s" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
sign, t,
)
else:
return "cmplx(%s,%s)" % (
self._print(Add(*pure_real)),
self._print(-S.ImaginaryUnit*Add(*pure_imaginary)),
)
else:
return CodePrinter._print_Add(self, expr)
def _print_Function(self, expr):
# All constant function args are evaluated as floats
prec = self._settings['precision']
args = [N(a, prec) for a in expr.args]
eval_expr = expr.func(*args)
if not isinstance(eval_expr, Function):
return self._print(eval_expr)
else:
return CodePrinter._print_Function(self, expr.func(*args))
def _print_Mod(self, expr):
# NOTE : Fortran has the functions mod() and modulo(). modulo() behaves
# the same wrt to the sign of the arguments as Python and SymPy's
# modulus computations (% and Mod()) but is not available in Fortran 66
# or Fortran 77, thus we raise an error.
if self._settings['standard'] in [66, 77]:
msg = ("Python % operator and SymPy's Mod() function are not "
"supported by Fortran 66 or 77 standards.")
raise NotImplementedError(msg)
else:
x, y = expr.args
return " modulo({}, {})".format(self._print(x), self._print(y))
def _print_ImaginaryUnit(self, expr):
# purpose: print complex numbers nicely in Fortran.
return "cmplx(0,1)"
def _print_int(self, expr):
return str(expr)
def _print_Mul(self, expr):
# purpose: print complex numbers nicely in Fortran.
if expr.is_number and expr.is_imaginary:
return "cmplx(0,%s)" % (
self._print(-S.ImaginaryUnit*expr)
)
else:
return CodePrinter._print_Mul(self, expr)
def _print_Pow(self, expr):
PREC = precedence(expr)
if equal_valued(expr.exp, -1):
return '%s/%s' % (
self._print(literal_dp(1)),
self.parenthesize(expr.base, PREC)
)
elif equal_valued(expr.exp, 0.5):
if expr.base.is_integer:
# Fortran intrinsic sqrt() does not accept integer argument
if expr.base.is_Number:
return 'sqrt(%s.0d0)' % self._print(expr.base)
else:
return 'sqrt(dble(%s))' % self._print(expr.base)
else:
return 'sqrt(%s)' % self._print(expr.base)
else:
return CodePrinter._print_Pow(self, expr)
def _print_Rational(self, expr):
p, q = int(expr.p), int(expr.q)
return "%d.0d0/%d.0d0" % (p, q)
def _print_Float(self, expr):
printed = CodePrinter._print_Float(self, expr)
e = printed.find('e')
if e > -1:
return "%sd%s" % (printed[:e], printed[e + 1:])
return "%sd0" % printed
def _print_Relational(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
op = expr.rel_op
op = op if op not in self._relationals else self._relationals[op]
return "{} {} {}".format(lhs_code, op, rhs_code)
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_AugmentedAssignment(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
return self._get_statement("{0} = {0} {1} {2}".format(
self._print(lhs_code), self._print(expr.binop), self._print(rhs_code)))
def _print_sum_(self, sm):
params = self._print(sm.array)
if sm.dim != None: # Must use '!= None', cannot use 'is not None'
params += ', ' + self._print(sm.dim)
if sm.mask != None: # Must use '!= None', cannot use 'is not None'
params += ', mask=' + self._print(sm.mask)
return '%s(%s)' % (sm.__class__.__name__.rstrip('_'), params)
def _print_product_(self, prod):
return self._print_sum_(prod)
def _print_Do(self, do):
excl = ['concurrent']
if do.step == 1:
excl.append('step')
step = ''
else:
step = ', {step}'
return (
'do {concurrent}{counter} = {first}, {last}'+step+'\n'
'{body}\n'
'end do\n'
).format(
concurrent='concurrent ' if do.concurrent else '',
**do.kwargs(apply=lambda arg: self._print(arg), exclude=excl)
)
def _print_ImpliedDoLoop(self, idl):
step = '' if idl.step == 1 else ', {step}'
return ('({expr}, {counter} = {first}, {last}'+step+')').format(
**idl.kwargs(apply=lambda arg: self._print(arg))
)
def _print_For(self, expr):
target = self._print(expr.target)
if isinstance(expr.iterable, Range):
start, stop, step = expr.iterable.args
else:
raise NotImplementedError("Only iterable currently supported is Range")
body = self._print(expr.body)
return ('do {target} = {start}, {stop}, {step}\n'
'{body}\n'
'end do').format(target=target, start=start, stop=stop - 1,
step=step, body=body)
def _print_Type(self, type_):
type_ = self.type_aliases.get(type_, type_)
type_str = self.type_mappings.get(type_, type_.name)
module_uses = self.type_modules.get(type_)
if module_uses:
for k, v in module_uses:
self.module_uses[k].add(v)
return type_str
def _print_Element(self, elem):
return '{symbol}({idxs})'.format(
symbol=self._print(elem.symbol),
idxs=', '.join((self._print(arg) for arg in elem.indices))
)
def _print_Extent(self, ext):
return str(ext)
def _print_Declaration(self, expr):
var = expr.variable
val = var.value
dim = var.attr_params('dimension')
intents = [intent in var.attrs for intent in (intent_in, intent_out, intent_inout)]
if intents.count(True) == 0:
intent = ''
elif intents.count(True) == 1:
intent = ', intent(%s)' % ['in', 'out', 'inout'][intents.index(True)]
else:
raise ValueError("Multiple intents specified for %s" % self)
if isinstance(var, Pointer):
raise NotImplementedError("Pointers are not available by default in Fortran.")
if self._settings["standard"] >= 90:
result = '{t}{vc}{dim}{intent}{alloc} :: {s}'.format(
t=self._print(var.type),
vc=', parameter' if value_const in var.attrs else '',
dim=', dimension(%s)' % ', '.join((self._print(arg) for arg in dim)) if dim else '',
intent=intent,
alloc=', allocatable' if allocatable in var.attrs else '',
s=self._print(var.symbol)
)
if val != None: # Must be "!= None", cannot be "is not None"
result += ' = %s' % self._print(val)
else:
if value_const in var.attrs or val:
raise NotImplementedError("F77 init./parameter statem. req. multiple lines.")
result = ' '.join((self._print(arg) for arg in [var.type, var.symbol]))
return result
def _print_Infinity(self, expr):
return '(huge(%s) + 1)' % self._print(literal_dp(0))
def _print_While(self, expr):
return 'do while ({condition})\n{body}\nend do'.format(**expr.kwargs(
apply=lambda arg: self._print(arg)))
def _print_BooleanTrue(self, expr):
return '.true.'
def _print_BooleanFalse(self, expr):
return '.false.'
def _pad_leading_columns(self, lines):
result = []
for line in lines:
if line.startswith('!'):
result.append(self._lead['comment'] + line[1:].lstrip())
else:
result.append(self._lead['code'] + line)
return result
def _wrap_fortran(self, lines):
"""Wrap long Fortran lines
Argument:
lines -- a list of lines (without \\n character)
A comment line is split at white space. Code lines are split with a more
complex rule to give nice results.
"""
# routine to find split point in a code line
my_alnum = set("_+-." + string.digits + string.ascii_letters)
my_white = set(" \t()")
def split_pos_code(line, endpos):
if len(line) <= endpos:
return len(line)
pos = endpos
split = lambda pos: \
(line[pos] in my_alnum and line[pos - 1] not in my_alnum) or \
(line[pos] not in my_alnum and line[pos - 1] in my_alnum) or \
(line[pos] in my_white and line[pos - 1] not in my_white) or \
(line[pos] not in my_white and line[pos - 1] in my_white)
while not split(pos):
pos -= 1
if pos == 0:
return endpos
return pos
# split line by line and add the split lines to result
result = []
if self._settings['source_format'] == 'free':
trailing = ' &'
else:
trailing = ''
for line in lines:
if line.startswith(self._lead['comment']):
# comment line
if len(line) > 72:
pos = line.rfind(" ", 6, 72)
if pos == -1:
pos = 72
hunk = line[:pos]
line = line[pos:].lstrip()
result.append(hunk)
while line:
pos = line.rfind(" ", 0, 66)
if pos == -1 or len(line) < 66:
pos = 66
hunk = line[:pos]
line = line[pos:].lstrip()
result.append("%s%s" % (self._lead['comment'], hunk))
else:
result.append(line)
elif line.startswith(self._lead['code']):
# code line
pos = split_pos_code(line, 72)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append(hunk)
while line:
pos = split_pos_code(line, 65)
hunk = line[:pos].rstrip()
line = line[pos:].lstrip()
if line:
hunk += trailing
result.append("%s%s" % (self._lead['cont'], hunk))
else:
result.append(line)
return result
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
if isinstance(code, str):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
free = self._settings['source_format'] == 'free'
code = [ line.lstrip(' \t') for line in code ]
inc_keyword = ('do ', 'if(', 'if ', 'do\n', 'else', 'program', 'interface')
dec_keyword = ('end do', 'enddo', 'end if', 'endif', 'else', 'end program', 'end interface')
increase = [ int(any(map(line.startswith, inc_keyword)))
for line in code ]
decrease = [ int(any(map(line.startswith, dec_keyword)))
for line in code ]
continuation = [ int(any(map(line.endswith, ['&', '&\n'])))
for line in code ]
level = 0
cont_padding = 0
tabwidth = 3
new_code = []
for i, line in enumerate(code):
if line in ('', '\n'):
new_code.append(line)
continue
level -= decrease[i]
if free:
padding = " "*(level*tabwidth + cont_padding)
else:
padding = " "*level*tabwidth
line = "%s%s" % (padding, line)
if not free:
line = self._pad_leading_columns([line])[0]
new_code.append(line)
if continuation[i]:
cont_padding = 2*tabwidth
else:
cont_padding = 0
level += increase[i]
if not free:
return self._wrap_fortran(new_code)
return new_code
def _print_GoTo(self, goto):
if goto.expr: # computed goto
return "go to ({labels}), {expr}".format(
labels=', '.join((self._print(arg) for arg in goto.labels)),
expr=self._print(goto.expr)
)
else:
lbl, = goto.labels
return "go to %s" % self._print(lbl)
def _print_Program(self, prog):
return (
"program {name}\n"
"{body}\n"
"end program\n"
).format(**prog.kwargs(apply=lambda arg: self._print(arg)))
def _print_Module(self, mod):
return (
"module {name}\n"
"{declarations}\n"
"\ncontains\n\n"
"{definitions}\n"
"end module\n"
).format(**mod.kwargs(apply=lambda arg: self._print(arg)))
def _print_Stream(self, strm):
if strm.name == 'stdout' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>input_unit')
return 'input_unit'
elif strm.name == 'stderr' and self._settings["standard"] >= 2003:
self.module_uses['iso_c_binding'].add('stdint=>error_unit')
return 'error_unit'
else:
if strm.name == 'stdout':
return '*'
else:
return strm.name
def _print_Print(self, ps):
if ps.format_string == none: # Must be '!= None', cannot be 'is not None'
template = "print {fmt}, {iolist}"
fmt = '*'
else:
template = 'write(%(out)s, fmt="{fmt}", advance="no"), {iolist}' % {
'out': {stderr: '0', stdout: '6'}.get(ps.file, '*')
}
fmt = self._print(ps.format_string)
return template.format(fmt=fmt, iolist=', '.join(
(self._print(arg) for arg in ps.print_args)))
def _print_Return(self, rs):
arg, = rs.args
return "{result_name} = {arg}".format(
result_name=self._context.get('result_name', 'sympy_result'),
arg=self._print(arg)
)
def _print_FortranReturn(self, frs):
arg, = frs.args
if arg:
return 'return %s' % self._print(arg)
else:
return 'return'
def _head(self, entity, fp, **kwargs):
bind_C_params = fp.attr_params('bind_C')
if bind_C_params is None:
bind = ''
else:
bind = ' bind(C, name="%s")' % bind_C_params[0] if bind_C_params else ' bind(C)'
result_name = self._settings.get('result_name', None)
return (
"{entity}{name}({arg_names}){result}{bind}\n"
"{arg_declarations}"
).format(
entity=entity,
name=self._print(fp.name),
arg_names=', '.join([self._print(arg.symbol) for arg in fp.parameters]),
result=(' result(%s)' % result_name) if result_name else '',
bind=bind,
arg_declarations='\n'.join((self._print(Declaration(arg)) for arg in fp.parameters))
)
def _print_FunctionPrototype(self, fp):
entity = "{} function ".format(self._print(fp.return_type))
return (
"interface\n"
"{function_head}\n"
"end function\n"
"end interface"
).format(function_head=self._head(entity, fp))
def _print_FunctionDefinition(self, fd):
if elemental in fd.attrs:
prefix = 'elemental '
elif pure in fd.attrs:
prefix = 'pure '
else:
prefix = ''
entity = "{} function ".format(self._print(fd.return_type))
with printer_context(self, result_name=fd.name):
return (
"{prefix}{function_head}\n"
"{body}\n"
"end function\n"
).format(
prefix=prefix,
function_head=self._head(entity, fd),
body=self._print(fd.body)
)
def _print_Subroutine(self, sub):
return (
'{subroutine_head}\n'
'{body}\n'
'end subroutine\n'
).format(
subroutine_head=self._head('subroutine ', sub),
body=self._print(sub.body)
)
def _print_SubroutineCall(self, scall):
return 'call {name}({args})'.format(
name=self._print(scall.name),
args=', '.join((self._print(arg) for arg in scall.subroutine_args))
)
def _print_use_rename(self, rnm):
return "%s => %s" % tuple((self._print(arg) for arg in rnm.args))
def _print_use(self, use):
result = 'use %s' % self._print(use.namespace)
if use.rename != None: # Must be '!= None', cannot be 'is not None'
result += ', ' + ', '.join([self._print(rnm) for rnm in use.rename])
if use.only != None: # Must be '!= None', cannot be 'is not None'
result += ', only: ' + ', '.join([self._print(nly) for nly in use.only])
return result
def _print_BreakToken(self, _):
return 'exit'
def _print_ContinueToken(self, _):
return 'cycle'
def _print_ArrayConstructor(self, ac):
fmtstr = "[%s]" if self._settings["standard"] >= 2003 else '(/%s/)'
return fmtstr % ', '.join((self._print(arg) for arg in ac.elements))
def _print_KeywordFunctionCall(self, expr):
args = [self._print(arg) for arg in expr.function_args]
for key, value in expr.keyword_args.items():
args.append(f"{key}={self._print(value)}")
return '{name}({args})'.format(
name=self._print(expr.name),
args=', '.join(args)
)
def _print_ArrayElement(self, elem):
return '{symbol}({idxs})'.format(
symbol=self._print(elem.name),
idxs=', '.join((self._print(arg) for arg in elem.indices))
)
| FCodePrinter |
python | django__django | tests/order_with_respect_to/models.py | {
"start": 168,
"end": 410
} | class ____(models.Model):
text = models.CharField(max_length=200)
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = "question"
def __str__(self):
return self.text
| Answer |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 21729,
"end": 22017
} | class ____(MaskedStringViewAttrs):
key = MaskedType(managed_udf_string)
def resolve_value(self, mod):
return managed_udf_string
cuda_decl_registry.register_attr(MaskedStringViewAttrs)
cuda_decl_registry.register_attr(MaskedManagedUDFStringAttrs)
| MaskedManagedUDFStringAttrs |
python | psf__black | tests/data/line_ranges_formatted/basic.py | {
"start": 82,
"end": 233
} | class ____:
def should_also_work(self):
pass
# fmt: on
a = [1, 2] # fmt: skip
# This should cover as many syntaxes as possible.
| Unformatted |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/base.py | {
"start": 19153,
"end": 20156
} | class ____:
"""
Draw a shadow underneath/behind this container.
(This applies `class:shadow` the the cells under the shadow. The Style
should define the colors for the shadow.)
:param body: Another container object.
"""
def __init__(self, body: AnyContainer) -> None:
self.container = FloatContainer(
content=body,
floats=[
Float(
bottom=-1,
height=1,
left=1,
right=-1,
transparent=True,
content=Window(style="class:shadow"),
),
Float(
bottom=-1,
top=1,
width=1,
right=-1,
transparent=True,
content=Window(style="class:shadow"),
),
],
)
def __pt_container__(self) -> Container:
return self.container
| Shadow |
python | huggingface__transformers | src/transformers/models/exaone4/modular_exaone4.py | {
"start": 20987,
"end": 21283
} | class ____(LlamaForQuestionAnswering):
pass
__all__ = [
"Exaone4Config",
"Exaone4PreTrainedModel",
"Exaone4Model",
"Exaone4ForCausalLM",
"Exaone4ForSequenceClassification",
"Exaone4ForTokenClassification",
"Exaone4ForQuestionAnswering",
]
| Exaone4ForQuestionAnswering |
python | FactoryBoy__factory_boy | tests/djapp/models.py | {
"start": 2781,
"end": 2856
} | class ____(AbstractWithCustomManager):
pass
| FromAbstractWithCustomManager |
python | plotly__plotly.py | tests/test_optional/test_tools/test_figure_factory.py | {
"start": 57629,
"end": 77829
} | class ____(TestCaseNoTemplate, NumpyTestUtilsMixin):
def test_validate_gantt(self):
# validate the basic gantt inputs
df = [
{
"Task": "Job A",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": "a",
}
]
pattern2 = (
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used."
)
self.assertRaisesRegex(
PlotlyError, pattern2, ff.create_gantt, df, index_col="foo"
)
df = "foo"
pattern3 = "You must input either a dataframe or a list of dictionaries."
self.assertRaisesRegex(PlotlyError, pattern3, ff.create_gantt, df)
df = []
pattern4 = "Your list is empty. It must contain at least one dictionary."
self.assertRaisesRegex(PlotlyError, pattern4, ff.create_gantt, df)
df = ["foo"]
pattern5 = "Your list must only include dictionaries."
self.assertRaisesRegex(PlotlyError, pattern5, ff.create_gantt, df)
def test_gantt_index(self):
# validate the index used for gantt
df = [
{
"Task": "Job A",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": 50,
}
]
pattern = (
"In order to use an indexing column and assign colors to "
"the values of the index, you must choose an actual "
"column name in the dataframe or key if a list of "
"dictionaries is being used."
)
self.assertRaisesRegex(
PlotlyError, pattern, ff.create_gantt, df, index_col="foo"
)
df = [
{
"Task": "Job A",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": "a",
},
{
"Task": "Job A",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": 50,
},
]
pattern2 = (
"Error in indexing column. Make sure all entries of each "
"column are all numbers or all strings."
)
self.assertRaisesRegex(
PlotlyError, pattern2, ff.create_gantt, df, index_col="Complete"
)
def test_gantt_validate_colors(self):
# validate the gantt colors variable
df = [
{
"Task": "Job A",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": 75,
"Resource": "A",
},
{
"Task": "Job B",
"Start": "2009-02-01",
"Finish": "2009-08-30",
"Complete": 50,
"Resource": "B",
},
]
pattern = "Whoops! The elements in your rgb colors tuples cannot exceed 255.0."
self.assertRaisesRegex(
PlotlyError,
pattern,
ff.create_gantt,
df,
index_col="Complete",
colors="rgb(300,1,1)",
)
self.assertRaises(
PlotlyError, ff.create_gantt, df, index_col="Complete", colors="foo"
)
pattern2 = "Whoops! The elements in your colors tuples cannot exceed 1.0."
self.assertRaisesRegex(
PlotlyError,
pattern2,
ff.create_gantt,
df,
index_col="Complete",
colors=(2, 1, 1),
)
# verify that if colors is a dictionary, its keys span all the
# values in the index column
colors_dict = {75: "rgb(1, 2, 3)"}
pattern3 = (
"If you are using colors as a dictionary, all of its "
"keys must be all the values in the index column."
)
self.assertRaisesRegex(
PlotlyError,
pattern3,
ff.create_gantt,
df,
index_col="Complete",
colors=colors_dict,
)
# check: index is set if colors is a dictionary
colors_dict_good = {50: "rgb(1, 2, 3)", 75: "rgb(5, 10, 15)"}
pattern4 = (
"Error. You have set colors to a dictionary but have not "
"picked an index. An index is required if you are "
"assigning colors to particular values in a dictionary."
)
self.assertRaisesRegex(
PlotlyError, pattern4, ff.create_gantt, df, colors=colors_dict_good
)
# check: number of colors is equal to or greater than number of
# unique index string values
pattern5 = (
"Error. The number of colors in 'colors' must be no less "
"than the number of unique index values in your group "
"column."
)
self.assertRaisesRegex(
PlotlyError,
pattern5,
ff.create_gantt,
df,
index_col="Resource",
colors=["#ffffff"],
)
# check: if index is numeric, colors has at least 2 colors in it
pattern6 = (
"You must use at least 2 colors in 'colors' if you "
"are using a colorscale. However only the first two "
"colors given will be used for the lower and upper "
"bounds on the colormap."
)
self.assertRaisesRegex(
PlotlyError,
pattern6,
ff.create_gantt,
df,
index_col="Complete",
colors=["#ffffff"],
)
def test_gannt_groups_and_descriptions(self):
# check if grouped gantt chart matches with expected output
df = [
dict(
Task="Task A",
Description="Task A - 1",
Start="2008-10-05",
Finish="2009-04-15",
IndexCol="TA",
),
dict(
Task="Task B",
Description="Task B - 1",
Start="2008-12-06",
Finish="2009-03-15",
IndexCol="TB",
),
dict(
Task="Task C",
Description="Task C - 1",
Start="2008-09-07",
Finish="2009-03-15",
IndexCol="TC",
),
dict(
Task="Task C",
Description="Task C - 2",
Start="2009-05-08",
Finish="2009-04-15",
IndexCol="TC",
),
dict(
Task="Task A",
Description="Task A - 2",
Start="2009-04-20",
Finish="2009-05-30",
IndexCol="TA",
),
]
test_gantt_chart = ff.create_gantt(
df,
colors=dict(TA="rgb(220, 0, 0)", TB="rgb(170, 14, 200)", TC=(1, 0.9, 0.16)),
show_colorbar=True,
index_col="IndexCol",
group_tasks=True,
)
exp_gantt_chart = graph_objs.Figure(
**{
"layout": {
"showlegend": True,
"yaxis": {
"range": [-1, 4],
"zeroline": False,
"ticktext": ["Task C", "Task B", "Task A"],
"tickvals": [0, 1, 2],
"autorange": False,
"showgrid": False,
},
"title": "Gantt Chart",
"height": 600,
"shapes": [],
"width": 900,
"xaxis": {
"zeroline": False,
"rangeselector": {
"buttons": [
{
"count": 7,
"step": "day",
"stepmode": "backward",
"label": "1w",
},
{
"count": 1,
"step": "month",
"stepmode": "backward",
"label": "1m",
},
{
"count": 6,
"step": "month",
"stepmode": "backward",
"label": "6m",
},
{
"count": 1,
"step": "year",
"stepmode": "todate",
"label": "YTD",
},
{
"count": 1,
"step": "year",
"stepmode": "backward",
"label": "1y",
},
{"step": "all"},
]
},
"type": "date",
"showgrid": False,
},
"hovermode": "closest",
},
"data": [
{
"legendgroup": "rgb(170, 14, 200)",
"name": "TB",
"fillcolor": "rgb(170, 14, 200)",
"mode": "none",
"hoverinfo": "name",
"y": [0.8, 0.8, 1.2, 1.2],
"x": ["2008-12-06", "2009-03-15", "2009-03-15", "2008-12-06"],
"fill": "toself",
},
{
"legendgroup": "rgb(220, 0, 0)",
"name": "TA",
"fillcolor": "rgb(220, 0, 0)",
"mode": "none",
"hoverinfo": "name",
"y": [1.8, 1.8, 2.2, 2.2, None, 1.8, 1.8, 2.2, 2.2],
"x": [
"2008-10-05",
"2009-04-15",
"2009-04-15",
"2008-10-05",
"2008-10-05",
"2009-04-20",
"2009-05-30",
"2009-05-30",
"2009-04-20",
],
"fill": "toself",
},
{
"legendgroup": "rgb(255, 230, 41)",
"name": "TC",
"fillcolor": "rgb(255, 230, 41)",
"mode": "none",
"hoverinfo": "name",
"y": [-0.2, -0.2, 0.2, 0.2, None, -0.2, -0.2, 0.2, 0.2],
"x": [
"2008-09-07",
"2009-03-15",
"2009-03-15",
"2008-09-07",
"2008-09-07",
"2009-05-08",
"2009-04-15",
"2009-04-15",
"2009-05-08",
],
"fill": "toself",
},
{
"showlegend": False,
"legendgroup": "rgb(170, 14, 200)",
"name": "",
"text": ["Task B - 1", "Task B - 1"],
"y": [1, 1],
"mode": "markers",
"marker": {
"opacity": 0,
"color": "rgb(170, 14, 200)",
"size": 1,
},
"x": ["2008-12-06", "2009-03-15"],
},
{
"showlegend": False,
"legendgroup": "rgb(220, 0, 0)",
"name": "",
"text": [
"Task A - 1",
"Task A - 1",
"Task A - 2",
"Task A - 2",
],
"y": [2, 2, 2, 2],
"mode": "markers",
"marker": {"opacity": 0, "color": "rgb(220, 0, 0)", "size": 1},
"x": ["2008-10-05", "2009-04-15", "2009-04-20", "2009-05-30"],
},
{
"showlegend": False,
"legendgroup": "rgb(255, 230, 41)",
"name": "",
"text": [
"Task C - 1",
"Task C - 1",
"Task C - 2",
"Task C - 2",
],
"y": [0, 0, 0, 0],
"mode": "markers",
"marker": {
"opacity": 0,
"color": "rgb(255, 230, 41)",
"size": 1,
},
"x": ["2008-09-07", "2009-03-15", "2009-05-08", "2009-04-15"],
},
],
}
)
self.assert_fig_equal(test_gantt_chart["data"][0], exp_gantt_chart["data"][0])
self.assert_fig_equal(test_gantt_chart["data"][1], exp_gantt_chart["data"][1])
self.assert_fig_equal(test_gantt_chart["data"][2], exp_gantt_chart["data"][2])
self.assert_fig_equal(test_gantt_chart["data"][3], exp_gantt_chart["data"][3])
def test_gantt_all_args(self):
# check if gantt chart matches with expected output
df = [
{
"Task": "Run",
"Start": "2010-01-01",
"Finish": "2011-02-02",
"Complete": 0,
},
{
"Task": "Fast",
"Start": "2011-01-01",
"Finish": "2012-06-05",
"Complete": 25,
},
]
test_gantt_chart = ff.create_gantt(
df,
colors="Blues",
index_col="Complete",
reverse_colors=True,
title="Title",
bar_width=0.5,
showgrid_x=True,
showgrid_y=True,
height=500,
width=500,
)
exp_gantt_chart = graph_objs.Figure(
**{
"data": [
{
"x": ["2011-01-01", "2012-06-05", "2012-06-05", "2011-01-01"],
"y": [0.5, 0.5, 1.5, 1.5],
"mode": "none",
"fill": "toself",
"showlegend": False,
"hoverinfo": "name",
"legendgroup": "rgb(166.25, 167.5, 208.0)",
"fillcolor": "rgb(166.25, 167.5, 208.0)",
"name": "25",
},
{
"x": ["2010-01-01", "2011-02-02", "2011-02-02", "2010-01-01"],
"y": [-0.5, -0.5, 0.5, 0.5],
"mode": "none",
"fill": "toself",
"showlegend": False,
"hoverinfo": "name",
"legendgroup": "rgb(220.0, 220.0, 220.0)",
"fillcolor": "rgb(220.0, 220.0, 220.0)",
"name": "0",
},
{
"x": ["2011-01-01", "2012-06-05"],
"y": [1, 1],
"mode": "markers",
"text": [None, None],
"marker": {
"color": "rgb(166.25, 167.5, 208.0)",
"size": 1,
"opacity": 0,
},
"name": "",
"showlegend": False,
"legendgroup": "rgb(166.25, 167.5, 208.0)",
},
{
"x": ["2010-01-01", "2011-02-02"],
"y": [0, 0],
"mode": "markers",
"text": [None, None],
"marker": {
"color": "rgb(220.0, 220.0, 220.0)",
"size": 1,
"opacity": 0,
},
"name": "",
"showlegend": False,
"legendgroup": "rgb(220.0, 220.0, 220.0)",
},
],
"layout": {
"title": "Title",
"showlegend": False,
"height": 500,
"width": 500,
"shapes": [],
"hovermode": "closest",
"yaxis": {
"showgrid": True,
"ticktext": ["Run", "Fast"],
"tickvals": [0, 1],
"range": [-1, 3],
"autorange": False,
"zeroline": False,
},
"xaxis": {
"showgrid": True,
"zeroline": False,
"rangeselector": {
"buttons": [
{
"count": 7,
"label": "1w",
"step": "day",
"stepmode": "backward",
},
{
"count": 1,
"label": "1m",
"step": "month",
"stepmode": "backward",
},
{
"count": 6,
"label": "6m",
"step": "month",
"stepmode": "backward",
},
{
"count": 1,
"label": "YTD",
"step": "year",
"stepmode": "todate",
},
{
"count": 1,
"label": "1y",
"step": "year",
"stepmode": "backward",
},
{"step": "all"},
]
},
"type": "date",
},
},
}
)
self.assert_fig_equal(test_gantt_chart["data"][0], exp_gantt_chart["data"][0])
self.assert_fig_equal(test_gantt_chart["data"][1], exp_gantt_chart["data"][1])
self.assert_fig_equal(test_gantt_chart["data"][2], exp_gantt_chart["data"][2])
self.assert_fig_equal(test_gantt_chart["layout"], exp_gantt_chart["layout"])
| TestGantt |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/workflow_events.py | {
"start": 2844,
"end": 3014
} | class ____(Event):
"""Tool call result."""
tool_name: str
tool_kwargs: dict
tool_id: str
tool_output: ToolOutput
return_direct: bool
| ToolCallResult |
python | encode__django-rest-framework | tests/models.py | {
"start": 4043,
"end": 4360
} | class ____(RESTFrameworkModel):
""" Test model where the primary key is a OneToOneField with another model. """
name = models.CharField(max_length=100)
target = models.OneToOneField(
OneToOneTarget, primary_key=True,
related_name='required_source', on_delete=models.CASCADE)
| OneToOnePKSource |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 16902,
"end": 17470
} | class ____(StepInputSource):
"""This step input source is the default value declared on an InputDefinition."""
node_handle: NodeHandle
input_name: str
def _load_value(self, pipeline_def: JobDefinition):
return pipeline_def.get_node(self.node_handle).definition.default_value_for_input(
self.input_name
)
def load_input_object(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
) -> Iterator[object]:
yield self._load_value(step_context.job_def)
| FromDefaultValue |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/othervirtual/package.py | {
"start": 217,
"end": 417
} | class ____(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/stuff-1.0.tar.gz"
version("1.0", md5="67890abcdef1234567890abcdef12345")
provides("stuff")
| Othervirtual |
python | joke2k__faker | faker/providers/job/pt_PT/__init__.py | {
"start": 76,
"end": 18092
} | class ____(BaseProvider):
jobs = [
"Diretor geral e gestor executivo, de empresas",
"Diretor de vendas",
"Diretores de investigação e desenvolvimento",
"Diretor de produção na agricultura",
"Diretor de produção animal",
"Diretor de produção florestal",
"Diretor de produção na pesca",
"Diretor de produção na aquicultura",
"Diretor das indústrias transformadoras",
"Diretor das indústrias extrativas",
"Diretor das indústrias de construção e de engenharia civil",
"Diretor de compras",
"Diretor de transportes",
"Diretores de armazenagem, distribuição e relacionados",
"Diretores dos serviços das tecnologias da informação e comunicação",
"Diretor dos serviços de cuidados de crianças",
"Diretor dos serviços de cuidados a pessoas idosas",
"Diretor dos serviços de apoio social",
"Diretor dos serviços de educação",
"Diretor de sucursais de bancos, serviços financeiros e de seguros",
"Oficiais e outros profissionais das forças e serviços de segurança, com funções de comando,",
"Diretor e gerente do comércio por grosso",
"Diretor e gerente de outros serviços, n.e.",
"Astrónomo",
"Oceanógrafo",
"Botânico",
"Farmacologista e outros especialistas relacionados",
"Consultor de atividades das pescas",
"Engenheiro de construção de edifícios",
"Engenheiro de obras de engenharia civil",
"Engenheiro mecânico",
"Engenheiro de minas",
"Engenheiro metalúrgico",
"Engenheiro eletrotécnico",
"Engenheiro eletrónico",
"Engenheiro de telecomunicações",
"Designer de têxteis e moda",
"Topógrafo",
"Enfermeiro de cuidados gerais",
"Enfermeiro especialista em reabilitação",
"Enfermeiro especialista em saúde infantil e pediátrica",
"Enfermeiro especialista em saúde mental e psiquiátrica",
"Enfermeiro especialista em enfermagem comunitária",
"Enfermeiro especialista em enfermagem médico cirúrgica",
"Enfermeiro especialista em saúde materna e obstétrica",
"Profissional paramédico",
"Farmacêutico",
"Fisioterapeuta",
"Dietista e nutricionista",
"Audiologista",
"Terapeuta da fala",
"Terapeuta ocupacional",
"Professor dos ensinos básico (2.º e 3.º ciclos) e secundário",
"Professor do ensino básico (1.º ciclo)",
"Educador de infância",
"Especialista em métodos de ensino",
"Professor do ensino especial",
"Especialista em recursos humanos",
"Especialista em formação e desenvolvimento de recursos humanos",
"Especialista em relações públicas",
"Analista de sistemas",
"Programador de software",
"Programador Web e de multimédia",
"Programador de aplicações",
"Administrador e especialista de conceção de base de dados",
"Administrador de sistemas",
"Especialista de redes informáticas",
"Notário",
"Curador de museus",
"Bibliotecários e outros especialistas de informação relacionados",
"Sociólogo",
"Antropólogo",
"Filósofo",
"Psicólogo",
"Especialista do trabalho social",
"Ministro de culto",
"Filólogo",
"Tradutor",
"Intérprete e outros linguistas",
"Escultor",
"Caricaturista",
"Compositor",
"Músico",
"Coreógrafo",
"Diretor de fotografia e de som, montador e relacionados",
"Ator",
"Disc Jockey",
"Técnico de engenharia civil",
"Técnico de eletricidade",
"Técnico de eletrónica",
"Técnico de inspeção de veículos",
"Técnico da metalurgia de base e da indústria extrativa",
"Desenhadores e técnicos afins",
"Técnico de gás",
"Encarregado da indústria extrativa",
"Encarregados das indústrias da madeira e cortiça",
"Encarregados das indústrias da pasta, papel, impressão",
"Encarregados das indústrias refinarias de petróleo",
"Encarregados das indústrias da transformação de minerais não metálicos",
"Encarregado da construção",
"Técnico de operação de instalações de produção de energia",
"Técnico de operação de incineradores",
"Técnico de operação de instalações de tratamento de água",
"Técnico de controlo de instalações da indústria química",
"Técnico de operação de instalações de refinação de petróleo e gás natural",
"Técnico de controlo de instalações de produção de metais",
"Técnico das ciências da vida (excetos ciências médicas)",
"Técnico florestal (inclui cinegético)",
"Oficial maquinista de navios",
"Oficial de convés e piloto de navios",
"Piloto de aeronaves",
"Controlador de tráfego aéreo",
"Técnico de cardiopneumografia",
"Técnico de medicina nuclear",
"Técnico de radioterapia",
"Técnico de análises clínicas",
"Técnico de anatomia patológica, citológica e tanatológica",
"Auxiliar de enfermagem",
"Parteira",
"Profissionais de nível intermédio da medicina tradicional e complementar",
"Terapeuta e assistente dentário",
"Técnico de registos médicos e de informação sobre saúde",
"Técnico dos serviços de saúde comun",
"Técnico e assistente, de fisioterapia",
"Assistente de médicos",
"Pessoal de ambulâncias",
"Corretor de bolsa, cambista",
"Representante comercial",
"Corretor comercial",
"Despachante, transitário",
"Organizador de conferências e eventos",
"Encarregado de armazém",
"Fiscal e encarregado de portagem",
"Supervisor de cargas e descargas",
"Secretário da área jurídica",
"Secretário administrativo e executivo",
"Encarregados das indústrias metalúrgicas de base e fabrico de produtos metálicos",
"Técnico dos serviços de saúde comunitária",
"Secretário da área da medicina",
"Técnico de nível intermédio dos serviços jurídicos e relacionados",
"Técnico de nível intermédio de apoio social",
"Jogador profissional de futebol",
"Ciclista profissional",
"Treinador de desportos",
"Árbitro (juiz) de desportos",
"Fotógrafo",
"Técnicos de galerias, bibliotecas, arquivos e museus",
"Toureiro, cavaleiro tauromáquico e outros profissionais similares",
"Técnico operador das tecnologias de informação e comunicação (TIC)",
"Técnico de apoio aos utilizadores das TIC",
"Técnico em redes e sistemas de computadores",
"Técnico da Web",
"Técnico de emissões de rádio",
"Técnico de emissões de televisão",
"Técnico de gravação audiovisual",
"Técnico de sistemas de comunicações via rádio",
"Técnico de telecomunicações",
"Empregado de escritório em geral",
"Técnico de secretariado",
"Empregado de banca nos casinos e outros empregados de apostas",
"Penhorista e prestamista",
"Empregado das agências de viagem",
"Pessoal de informação administrativa",
"Rececionista, exceto de hotel",
"Outro pessoal de receção e de informação a clientes",
"Operador de contabilidade e escrituração comercial",
"Operador de dados de processamento de pagamentos",
"Empregado de aprovisionamento",
"Empregado de armazém",
"Controlador de transportes terrestres de passageiros",
"Controlador de transportes terrestres de mercadorias",
"Empregado de biblioteca",
"Carteiro",
"Codificador, revisor de provas",
"Escrivão",
"Empregado de serviço de pessoal",
"Outro pessoal de apoio de tipo administrativo, n.e.",
"Fiscal e cobrador de transportes públicos",
"Cozinheiro",
"Cabeleireiro e barbeiro",
"Esteticista",
"Massagista de estética",
"Manicura, pedicura e calista",
"Governante doméstico",
"Pessoal de companhia e ajudantes de quarto",
"Agente funerário",
"Embalsamador",
"Instrutor de condução",
"Vendedor ambulante de produtos alimentares",
"Vendedor em loja (estabelecimento)",
"Operador de caixa",
"Vendedor ao domicílio",
"Assistente de venda de alimentos ao balcão",
"Auxiliar de cuidados de crianças",
"Auxiliar de professor",
"Auxiliar de saúde",
"Ajudante familiar",
"Bombeiro",
"Porteiro de hotelaria",
"Segurança (vigilante privado), outros porteiros",
"Outro pessoal dos serviços de proteção e segurança",
"Agricultor e trabalhador qualificado de cereais e outras culturas extensivas",
"Trabalhador qualificado da jardinagem",
"Produtor e trabalhador qualificado na produção de bovinos",
"Produtor e trabalhador qualificado na produção de ovinos e caprinos",
"Produtor e trabalhador qualificado na produção de suínos",
"Produtor e trabalhador qualificado na produção de outros animais de carne",
"Avicultor e trabalhador qualificado da avicultura",
"Sericicultor e trabalhador qualificado da sericicultura",
"Motosserrista",
"Sapador florestal",
"Encarregado de limpeza e de trabalhos domésticos em escritórios, hotéis e outros",
"Aquicultor (aquacultor)",
"Pescador de águas interiores ",
"Construtor de casas rudimentares",
"Pedreiro",
"Calceteiro",
"Assentador de refratários",
"Polidor da pedra",
"Canteiro",
"Cimenteiro",
"Armador de ferro",
"Montador de alvenarias e de pré",
"Carpinteiro de limpos e de tosco",
"Carpinteiro naval",
"Montador de andaimes",
"Espalhador de betuminosos",
"Colocador de telhados e de coberturas",
"Assentador de tacos e afagador de madeira",
"Ladrilhador",
"Estucador",
"Trabalhador qualificado em isolamentos acústicos e térmicos",
"Vidraceiro",
"Canalizador",
"Montador de tubagens",
"Instalador de ar condicionado e de sistemas de refrigeração",
"Pintor de construções",
"Colocador de papel de parede, pintor decorador",
"Pintor à pistola de superfícies",
"Envernizador",
"Limpador de chaminés e de outras estruturas de edifícios",
"Operador de fundição",
"Trabalhador de corte a oxigas",
"Bate chapa de veículos automóveis",
"Funileiro e caldeireiro",
"Serralheiro civil",
"Outro preparador e montador de estruturas metálicas",
"Forjador e ferreiro",
"Operador de prensa de forjar, estampador",
"Serralheiro de moldes, cunhos, cortantes",
"Regulador e operador de máquinas ferramentas convencionais para trabalhar metais",
"Regulador e operador de máquinas ferramentas de comando numérico computorizado",
"Retificador de rodas, polidor e afiador de metais",
"Mecânico e reparador de veículos automóveis",
"Técnico de manutenção e reparação de motores de avião",
"Mecânico e reparador, de máquinas agrícolas e industriais",
"Reparador de bicicletas",
"Trabalhador qualificado do fabrico e reparação de instrumentos de precisão",
"Trabalhador qualificado do fabrico e afinação de instrumentos musicais",
"Joalheiro",
"Filigranista",
"Soprador de artigos de vidro",
"Cortador de vidro",
"Polidor e acabador de artigos de vidro",
"Lapidador e gravador, de vidro, cerâmica e outros materiais",
"Pintor decorador de vidro, cerâmica e outros materiais",
"Artesão de artigos em madeira",
"Artesão de rendas, bordados e tapeçarias, manuais",
"Artesão de artigos de couro",
"Operador de pré-impressão",
"Eletricista de construções",
"Instalador de sistemas solares térmicos",
"Instalador de sistemas solares fotovoltaicos",
"Instalador de sistemas de bioenergia",
"Eletromecânico, eletricista e outros instaladores de máquinas e equipamentos elétricos",
"Instalador e reparador de linhas elétricas",
"Mecânico e reparador de equipamentos eletrónicos",
"Instalador e reparador, de tecnologias de informação e comunicação",
"Cortador de carne",
"Salsicheiro",
"Preparador e conservador de peixe",
"Padeiro",
"Confeiteiro",
"Trabalhador do fabrico de produtos lácteos",
"Conserveiro de frutas, legumes",
"Trabalhador do tratamento da madeira",
"Marceneiro",
"Tanoeiro, embutidor e outros similares a marceneiro",
"Operador de máquinas e de equipamentos para trabalhar madeira",
"Alfaiate e costureiro",
"Peleiro",
"Chapeleiro",
"Riscador de moldes e cortador de tecidos",
"Bordador",
"Trabalhador de costura",
"Estofador",
"Curtidor de peles",
"Maleiro",
"Correeiro",
"Mergulhador",
"Carregador de fogo e dinamitador",
"Fumigador e outros controladores, de pragas e ervas daninhas",
"Salineiro",
"Trabalhador de fabrico de foguetes (fogueteiro)",
"Mineiro",
"Trabalhador das pedreiras",
"Operador de instalações de processamento de minérios",
"Operador de instalações de processamento de rochas",
"Perfurador de poços, sondador",
"Operador de máquinas para trabalhar o cimento",
"Operador de máquinas para trabalhar a pedra",
"Operador de máquinas para trabalhar outros minerais",
"Operador de instalações de fornos e de primeira transformação de metais",
"Operador de instalações de fornos de segunda fusão, vazadores e laminadores de metais.",
"Operador de instalações de tratamento térmico de metais",
"Operador de instalações de trefilagem e estiragem",
"Operador de máquinas de revestimento, metalização e acabamento de metais",
"Operador de instalações e máquinas para moagem de substâncias químicas",
"Operador de instalações e máquinas para tratamento térmico de produtos químicos",
"Operador de instalações e máquinas,",
"Operador de instalações e máquinas, para reação e verificação de produtos químicos",
"Operador de instalações e máquinas, para petróleo e gás",
"Operador de instalações e máquinas para outros tratamentos químicos",
"Operador de máquinas para o fabrico de produtos de borracha",
"Operador de máquinas para o fabrico de produtos de matérias plásticas",
"Operador de máquinas para o fabrico de produtos de papel",
"Operador de máquinas de tecer e tricotar",
"Operador de máquinas de costura",
"Operador de máquinas de branquear, tingir e limpar, tecidos e outros têxteis",
"Operador de máquinas de fabrico de calçado",
"Operador de máquinas de lavandaria",
"Operador de máquinas de preparação de carne e peixe",
"Operador de máquinas de moagem de cereais, de transformação de arroz e de fabricação de",
"Operador de máquinas de produção e refinação de açúcar",
"Operador de máquinas para preparação de chá, café e cacau",
"Operador de máquinas para preparação de vinhos e outras bebidas",
"Operador de máquinas para o fabrico do tabaco",
"Operador de instalações para o fabrico de pasta de papel e de papel",
"Operador de instalações para o trabalho da madeira e cortiça",
"Operador de instalações para o fabrico de vidro",
"Operador de máquinas a vapor e caldeiras",
"Operador de máquinas para fabrico de produtos de arame",
"Montador de maquinaria mecânica",
"Maquinista de locomotivas",
"Guarda freios, agulheiro e agente de manobras de caminhos de ferro",
"Condutor de motociclos",
"Motorista de táxis",
"Motorista de automóveis ligeiros e carrinhas",
"Motorista de autocarros",
"Motorista de veículos pesados de mercadorias",
"Operador de máquinas agrícolas e florestais, móveis",
"Operador de máquinas de escavação, terraplenagem",
"Operador de gruas, guindastes",
"Operador de instalações e máquinas, para filtragem e separação química",
"Tripulação de convés de navios",
"Trabalhador de limpeza em escritórios, hotéis",
"Lavadeiro e engomador de roupa",
"Lavador de veículos",
"Lavador de janelas",
"Trabalhador não qualificado da floresta",
"Trabalhador não qualificado das minas",
"Trabalhador não qualificado das pedreiras",
"Trabalhador não qualificado de engenharia civil",
"Trabalhador não qualificado da construção de edifícios",
"Embalador manual da indústria transformadora",
"Condutor de veículos acionados à mão ou ao pé",
"Condutor de veículos de tração animal",
"Carregadores e descarregadores não qualificados de mercadorias",
"Preparador de refeições rápidas",
"Ajudante de cozinha",
"Prestador de serviços na rua",
"Vendedor ambulante",
"Trabalhador da recolha de resíduos",
"Trabalhador da triagem de resíduos",
"Cantoneiro de limpeza",
"Empregado de lavabos",
"Estafeta",
"Bagageiro",
"Distribuidor de mercadorias",
"Colocador de anúncios",
"Leitor de contadores",
"Coletor de dinheiro em máquinas de venda automática",
"Coveiro",
]
| Provider |
python | pallets__werkzeug | src/werkzeug/datastructures/range.py | {
"start": 3936,
"end": 4707
} | class ____(t.Generic[T]):
def __set_name__(self, owner: type[ContentRange], name: str) -> None:
self.attr = f"_{name}"
@t.overload
def __get__(self, instance: None, owner: None) -> te.Self: ...
@t.overload
def __get__(self, instance: ContentRange, owner: type[ContentRange]) -> T: ...
def __get__(
self, instance: ContentRange | None, owner: type[ContentRange] | None
) -> te.Self | T:
if instance is None:
return self
return instance.__dict__[self.attr] # type: ignore[no-any-return]
def __set__(self, instance: ContentRange, value: T) -> None:
instance.__dict__[self.attr] = value
if instance.on_update is not None:
instance.on_update(instance)
| _CallbackProperty |
python | scipy__scipy | scipy/_lib/tests/test_config.py | {
"start": 230,
"end": 1244
} | class ____:
REQUIRED_CONFIG_KEYS = [
"Compilers",
"Machine Information",
"Python Information",
]
@patch("scipy.__config__._check_pyyaml")
def test_pyyaml_not_found(self, mock_yaml_importer):
mock_yaml_importer.side_effect = ModuleNotFoundError()
with pytest.warns(UserWarning):
scipy.show_config()
def test_dict_mode(self):
config = scipy.show_config(mode="dicts")
assert isinstance(config, dict)
assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), (
"Required key missing,"
" see index of `False` with `REQUIRED_CONFIG_KEYS`"
)
def test_invalid_mode(self):
with pytest.raises(AttributeError):
scipy.show_config(mode="foo")
def test_warn_to_add_tests(self):
assert len(scipy.__config__.DisplayModes) == 2, (
"New mode detected,"
" please add UT if applicable and increment this count"
)
| TestSciPyConfigs |
python | run-llama__llama_index | llama-index-core/llama_index/core/vector_stores/types.py | {
"start": 2264,
"end": 2481
} | class ____(str, Enum):
"""Vector store filter conditions to combine different filters."""
# TODO add more conditions
AND = "and"
OR = "or"
NOT = "not" # negates the filter condition
| FilterCondition |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 64832,
"end": 67068
} | class ____(Operation):
def __init__(self, axes, keepdims=False, synchronized=False, *, name=None):
super().__init__(name=name)
self.axes = axes
self.keepdims = keepdims
self.synchronized = synchronized
def call(self, x):
return backend.nn.moments(
x,
axes=self.axes,
keepdims=self.keepdims,
synchronized=self.synchronized,
)
def compute_output_spec(self, x):
return (
KerasTensor(
reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims),
dtype=x.dtype,
),
KerasTensor(
reduce_shape(x.shape, axis=self.axes, keepdims=self.keepdims),
dtype=x.dtype,
),
)
@keras_export(
[
"keras.ops.moments",
"keras.ops.nn.moments",
]
)
def moments(x, axes, keepdims=False, synchronized=False):
"""Calculates the mean and variance of `x`.
The mean and variance are calculated by aggregating the contents of `x`
across `axes`. If `x` is 1-D and `axes = [0]` this is just the mean and
variance of a vector.
Args:
x: Input tensor.
axes: A list of axes which to compute mean and variance.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
synchronized: Only applicable with the TensorFlow backend.
If `True`, synchronizes the global batch statistics (mean and
variance) across all devices at each training step in a
distributed training strategy. If `False`, each replica uses its own
local batch statistics.
Returns:
A tuple containing two tensors - mean and variance.
Example:
>>> x = keras.ops.convert_to_tensor([0, 1, 2, 3, 100], dtype="float32")
>>> keras.ops.moments(x, axes=[0])
(array(21.2, dtype=float32), array(1553.3601, dtype=float32))
"""
if any_symbolic_tensors((x,)):
return Moments(axes, keepdims, synchronized=synchronized).symbolic_call(
x
)
return backend.nn.moments(x, axes, keepdims, synchronized=synchronized)
| Moments |
python | fsspec__filesystem_spec | fsspec/tests/test_spec.py | {
"start": 35424,
"end": 35626
} | class ____(DummyTestFS):
blocksize = 10
def _open(self, path, mode="rb", **kwargs):
stream = open(path, mode)
stream.size = os.stat(path).st_size
return stream
| DummyOpenFS |
python | ansible__ansible | test/units/module_utils/facts/test_ansible_collector.py | {
"start": 10395,
"end": 10644
} | class ____(collector.BaseFactCollector):
name = 'provides_something'
_fact_ids = set(['needed_fact'])
def collect(self, module=None, collected_facts=None):
return {'needed_fact': 'THE_NEEDED_FACT_VALUE'}
| ProvidesOtherFactCollector |
python | apache__airflow | providers/slack/tests/unit/slack/hooks/test_slack_webhook.py | {
"start": 6117,
"end": 7300
} | class ____:
@pytest.mark.asyncio
async def test_ok_response(self):
"""Test async decorator with OK response."""
@async_check_webhook_response
async def decorated():
return MOCK_WEBHOOK_RESPONSE
assert await decorated() is MOCK_WEBHOOK_RESPONSE
@pytest.mark.asyncio
@pytest.mark.parametrize(
("status_code", "body"),
[
(400, "invalid_payload"),
(403, "action_prohibited"),
(404, "channel_not_found"),
(410, "channel_is_archived"),
(500, "rollup_error"),
(418, "i_am_teapot"),
],
)
async def test_error_response(self, status_code, body):
"""Test async decorator with error response."""
test_response = WebhookResponse(url="foo://bar", status_code=status_code, body=body, headers={})
@async_check_webhook_response
async def decorated():
return test_response
error_message = rf"Response body: '{body}', Status Code: {status_code}\."
with pytest.raises(AirflowException, match=error_message):
await decorated()
| TestAsyncCheckWebhookResponseDecorator |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 48393,
"end": 48883
} | class ____(TestCase):
def test_should_assert_if_writing_readonly_fields(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = OneFieldModel
fields = ('char_field',)
readonly_fields = fields
obj = OneFieldModel.objects.create(char_field='abc')
with pytest.raises(AssertionError) as cm:
TestSerializer(obj).fields
cm.match(r'readonly_fields')
| Issue4897TestCase |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/event_api.py | {
"start": 659,
"end": 745
} | class ____(Enum):
OFFSET = "OFFSET"
STORAGE_ID = "STORAGE_ID"
| EventLogCursorType |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 153221,
"end": 169750
} | class ____(CType):
# name string
# cname string
# scope CppClassScope
# templates [string] or None
is_cpp_class = 1
has_attributes = 1
exception_check = True
namespace = None
# For struct-like declaration.
kind = "struct"
packed = False
typedef_flag = False
subtypes = ['templates']
def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None):
self.name = name
self.cname = cname
self.scope = scope
self.base_classes = base_classes
self.operators = []
self.templates = templates
self.template_type = template_type
self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ())
if templates:
self.specializations = {tuple(zip(templates, templates)): self}
else:
self.specializations = {}
self.is_cpp_string = cname in cpp_string_conversions
self.is_unowned_view = cname in cpp_unowned_views
def use_conversion_utility(self, from_or_to):
pass
def maybe_unordered(self):
if 'unordered' in self.cname:
return 'unordered_'
else:
return ''
def can_coerce_from_pyobject(self, env):
if self.cname in builtin_cpp_conversions:
template_count = builtin_cpp_conversions[self.cname]
for ix, T in enumerate(self.templates or []):
if ix >= template_count:
break
if T.is_pyobject or not T.can_coerce_from_pyobject(env):
return False
return True
elif self.cname in cpp_string_conversions:
return True
return False
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
context[X[ix]] = T
if self.cname in cpp_string_conversions:
cls = 'string'
tags = type_identifier(self),
else:
cls = self.cname[5:]
cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags))
context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
})
# Override directives that should not be inherited from user code.
from .UtilityCode import CythonUtilityCode
directives = CythonUtilityCode.filter_inherited_directives(env.directives)
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx",
context=context, compiler_directives=directives))
self.from_py_function = cname
return True
def can_coerce_to_pyobject(self, env):
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if T.is_pyobject or not T.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
context[X[ix]] = T
if self.cname in cpp_string_conversions:
cls = 'string'
prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode
tags = type_identifier(self),
else:
cls = self.cname[5:]
prefix = ''
cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags))
context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
})
from .UtilityCode import CythonUtilityCode
# Override directives that should not be inherited from user code.
directives = CythonUtilityCode.filter_inherited_directives(env.directives)
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx",
context=context, compiler_directives=directives))
self.to_py_function = cname
return True
def is_template_type(self):
return self.templates is not None and self.template_type is None
def get_fused_types(self, result=None, seen=None, include_function_return_type=False):
if result is None:
result = []
seen = set()
if self.namespace:
self.namespace.get_fused_types(result, seen)
if self.templates:
for T in self.templates:
T.get_fused_types(result, seen)
return result
def specialize_here(self, pos, env, template_values=None):
if not self.is_template_type():
error(pos, "'%s' type is not a template" % self)
return error_type
if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates):
num_defaults = len(self.templates) - len(template_values)
partial_specialization = self.declaration_code('', template_params=template_values)
# Most of the time we don't need to declare anything typed to these
# default template arguments, but when we do there's no way in C++
# to reference this directly. However, it is common convention to
# provide a typedef in the template class that resolves to each
# template type. For now, allow the user to specify this name as
# the template parameter.
# TODO: Allow typedefs in cpp classes and search for it in this
# classes scope as a concrete name we could use.
template_values = template_values + [
TemplatePlaceholderType(
"%s::%s" % (partial_specialization, param.name), True)
for param in self.templates[-num_defaults:]]
if len(self.templates) != len(template_values):
error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
has_object_template_param = False
for value in template_values:
if value.is_pyobject or value.needs_refcounting:
has_object_template_param = True
type_description = "Python object" if value.is_pyobject else "Reference-counted"
error(pos,
"%s type '%s' cannot be used as a template argument" % (
type_description, value))
if has_object_template_param:
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if self.templates is None:
self.templates = []
key = tuple(values.items())
if key in self.specializations:
return self.specializations[key]
template_values = [t.specialize(values) for t in self.templates]
specialized = self.specializations[key] = \
CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
# Need to do these *after* self.specializations[key] is set
# to avoid infinite recursion on circular references.
specialized.base_classes = [b.specialize(values) for b in self.base_classes]
if self.namespace is not None:
specialized.namespace = self.namespace.specialize(values)
specialized.scope = self.scope.specialize(values, specialized)
if self.cname == 'std::vector':
# vector<bool> is special cased in the C++ standard, and its
# accessors do not necessarily return references to the underlying
# elements (which may be bit-packed).
# http://www.cplusplus.com/reference/vector/vector-bool/
# Here we pretend that the various methods return bool values
# (as the actual returned values are coercible to such, and
# we don't support call expressions as lvalues).
T = values.get(self.templates[0], None)
if T and not T.is_fused and T.empty_declaration_code() == 'bool':
for bit_ref_returner in ('at', 'back', 'front'):
if bit_ref_returner in specialized.scope.entries:
specialized.scope.entries[bit_ref_returner].type.return_type = T
return specialized
def deduce_template_params(self, actual):
if actual.is_cv_qualified:
actual = actual.cv_base_type
if actual.is_reference:
actual = actual.ref_base_type
if self == actual:
return {}
elif actual.is_cpp_class:
self_template_type = self
while getattr(self_template_type, 'template_type', None):
self_template_type = self_template_type.template_type
def all_bases(cls):
yield cls
for parent in cls.base_classes:
yield from all_bases(parent)
for actual_base in all_bases(actual):
template_type = actual_base
while getattr(template_type, 'template_type', None):
template_type = template_type.template_type
if (self_template_type.empty_declaration_code()
== template_type.empty_declaration_code()):
return reduce(
merge_template_deductions,
[formal_param.deduce_template_params(actual_param)
for (formal_param, actual_param)
in zip(self.templates, actual_base.templates)],
{})
else:
return {}
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
template_params = None):
if template_params is None:
template_params = self.templates
if self.templates:
template_strings = [param.declaration_code('', for_display, None, pyrex)
for param in template_params
if not is_optional_template_param(param) and not param.is_fused]
if for_display:
brackets = "[%s]"
else:
brackets = "<%s> "
templates = brackets % ",".join(template_strings)
else:
templates = ""
if pyrex or for_display:
base_code = "%s%s" % (self.name, templates)
else:
base_code = "%s%s" % (self.cname, templates)
if self.namespace is not None:
base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def cpp_optional_declaration_code(self, entity_code, dll_linkage=None, template_params=None):
return "__Pyx_Optional_Type<%s> %s" % (
self.declaration_code("", False, dll_linkage, False,
template_params),
entity_code)
def is_subclass(self, other_type):
if self.same_as_resolved_type(other_type):
return 1
for base_class in self.base_classes:
if base_class.is_subclass(other_type):
return 1
return 0
def subclass_dist(self, super_type):
if self.same_as_resolved_type(super_type):
return 0
elif not self.base_classes:
return float('inf')
else:
return 1 + min(b.subclass_dist(super_type) for b in self.base_classes)
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
# This messy logic is needed due to GH Issue #1852.
elif (self.cname == other_type.cname and
(self.template_type and other_type.template_type
or self.templates
or other_type.templates)):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if is_optional_template_param(t1) and is_optional_template_param(t2):
break
if not t1.same_as_resolved_type(t2):
return 0
return 1
return 0
def assignable_from_resolved_type(self, other_type):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
elif other_type.is_cpp_class:
return other_type.is_subclass(self)
elif other_type.is_string and self.cname in cpp_string_conversions:
return True
def attributes_known(self):
return self.scope is not None
def find_cpp_operation_type(self, operator, operand_type=None):
operands = [self]
if operand_type is not None:
operands.append(operand_type)
# pos == None => no errors
operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
if not operator_entry:
return None
func_type = operator_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
def get_constructor(self, pos):
constructor = self.scope.lookup('<init>')
if constructor is not None:
return constructor
# Otherwise: automatically declare no-args default constructor.
# Make it "nogil" if the base classes allow it.
nogil = True
for base in self.base_classes:
base_constructor = base.scope.lookup('<init>')
if base_constructor and not base_constructor.type.nogil:
nogil = False
break
func_type = CFuncType(self, [], exception_check='+', nogil=nogil)
return self.scope.declare_cfunction('<init>', func_type, pos)
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup('<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
error(pos, "C++ class must have a nullary constructor to be %s" % msg)
def cpp_optional_check_for_null_code(self, cname):
# only applies to c++ classes that are being declared as std::optional
return "(%s.has_value())" % cname
def needs_explicit_construction(self, scope):
return scope.is_c_class_scope
def needs_explicit_destruction(self, scope):
return self.needs_explicit_construction(scope) # same rules
def generate_explicit_destruction(self, code, entry, extra_access_code=""):
code.putln(f"__Pyx_call_destructor({extra_access_code}{entry.cname});")
def generate_explicit_construction(self, code, entry, extra_access_code=""):
code.put_cpp_placement_new(f"{extra_access_code}{entry.cname}")
| CppClassType |
python | celery__celery | celery/platforms.py | {
"start": 8818,
"end": 17822
} | class ____:
"""Context manager daemonizing the process."""
_is_open = False
def __init__(self, pidfile=None, workdir=None, umask=None,
fake=False, after_chdir=None, after_forkers=True,
**kwargs):
if isinstance(umask, str):
# octal or decimal, depending on initial zero.
umask = int(umask, 8 if umask.startswith('0') else 10)
self.workdir = workdir or DAEMON_WORKDIR
self.umask = umask
self.fake = fake
self.after_chdir = after_chdir
self.after_forkers = after_forkers
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
def redirect_to_null(self, fd):
if fd is not None:
dest = os.open(os.devnull, os.O_RDWR)
os.dup2(dest, fd)
def open(self):
if not self._is_open:
if not self.fake:
self._detach()
os.chdir(self.workdir)
if self.umask is not None:
os.umask(self.umask)
if self.after_chdir:
self.after_chdir()
if not self.fake:
# We need to keep /dev/urandom from closing because
# shelve needs it, and Beat needs shelve to start.
keep = list(self.stdfds) + fd_by_path(['/dev/urandom'])
close_open_fds(keep)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
if self.after_forkers and mputil is not None:
mputil._run_after_forkers()
self._is_open = True
__enter__ = open
def close(self, *args):
if self._is_open:
self._is_open = False
__exit__ = close
def _detach(self):
if os.fork() == 0: # first child
os.setsid() # create new session
if os.fork() > 0: # pragma: no cover
# second child
os._exit(0)
else:
os._exit(0)
return self
def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).
Arguments:
logfile (str): Optional log file.
The ability to write to this file
will be verified before the process is detached.
pidfile (str): Optional pid file.
The pidfile won't be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
uid (int, str): Optional user id or user name to change
effective privileges to.
gid (int, str): Optional group id or group name to change
effective privileges to.
umask (str, int): Optional umask that'll be effective in
the child process.
workdir (str): Optional new working directory.
fake (bool): Don't actually detach, intended for debugging purposes.
**opts (Any): Ignored.
Example:
>>> from celery.platforms import detached, create_pidlock
>>> with detached(
... logfile='/var/log/app.log',
... pidfile='/var/run/app.pid',
... uid='nobody'):
... # Now in detached child process with effective user set to nobody,
... # and we know that our logfile can be written to, and that
... # the pidfile isn't locked.
... pidlock = create_pidlock('/var/run/app.pid')
...
... # Run the program
... program.run(logfile='/var/log/app.log')
"""
if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir
signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)
def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()
return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
)
def parse_uid(uid):
"""Parse user id.
Arguments:
uid (str, int): Actual uid, or the username of a user.
Returns:
int: The actual uid.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError(f'User does not exist: {uid}')
def parse_gid(gid):
"""Parse group id.
Arguments:
gid (str, int): Actual gid, or the name of a group.
Returns:
int: The actual gid of the group.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError(f'Group does not exist: {gid}')
def _setgroups_hack(groups):
# :fun:`setgroups` may have a platform-dependent limit,
# and it's not always possible to know in advance what this limit
# is, so we use this ugly hack stolen from glibc.
groups = groups[:]
while 1:
try:
return os.setgroups(groups)
except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
except OSError as exc: # error from the OS.
if exc.errno != errno.EINVAL or len(groups) <= 1:
raise
groups[:] = groups[:-1]
def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception: # pylint: disable=broad-except
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise
def initgroups(uid, gid):
"""Init process group permissions.
Compat version of :func:`os.initgroups` that was first
added to Python 2.7.
"""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups)
def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid))
def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid))
def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed.
"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise SecurityError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)
if uid:
_setuid(uid, gid)
else:
gid and setgid(gid)
if uid and not os.getuid() and not os.geteuid():
raise SecurityError('Still root uid after drop privileges!')
if gid and not os.getgid() and not os.getegid():
raise SecurityError('Still root gid after drop privileges!')
def _setuid(uid, gid):
# If GID isn't defined, get the primary GID of the user.
if not gid and pwd:
gid = pwd.getpwuid(uid).pw_gid
# Must set the GID before initgroups(), as setgid()
# is known to zap the group list on some platforms.
# setgid must happen before setuid (otherwise the setgid operation
# may fail because of insufficient privileges and possibly stay
# in a privileged group).
setgid(gid)
initgroups(uid, gid)
# at last:
setuid(uid)
# ... and make sure privileges cannot be restored:
try:
setuid(0)
except OSError as exc:
if exc.errno != errno.EPERM:
raise
# we should get here: cannot restore privileges,
# everything was fine.
else:
raise SecurityError(
'non-root user able to restore privileges after setuid.')
if hasattr(_signal, 'setitimer'):
def _arm_alarm(seconds):
_signal.setitimer(_signal.ITIMER_REAL, seconds)
else:
def _arm_alarm(seconds):
_signal.alarm(math.ceil(seconds))
| DaemonContext |
python | doocs__leetcode | solution/1600-1699/1647.Minimum Deletions to Make Character Frequencies Unique/Solution2.py | {
"start": 0,
"end": 319
} | class ____:
def minDeletions(self, s: str) -> int:
cnt = Counter(s)
vals = sorted(cnt.values(), reverse=True)
ans = 0
for i in range(1, len(vals)):
while vals[i] >= vals[i - 1] and vals[i] > 0:
vals[i] -= 1
ans += 1
return ans
| Solution |
python | django__django | tests/admin_views/tests.py | {
"start": 145291,
"end": 148866
} | class ____(TestCase):
"""Tests for proxy models permissions in the admin."""
@classmethod
def setUpTestData(cls):
cls.viewuser = User.objects.create_user(
username="viewuser", password="secret", is_staff=True
)
cls.adduser = User.objects.create_user(
username="adduser", password="secret", is_staff=True
)
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.deleteuser = User.objects.create_user(
username="deleteuser", password="secret", is_staff=True
)
# Setup permissions.
opts = UserProxy._meta
cls.viewuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("view", opts))
)
cls.adduser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("add", opts))
)
cls.changeuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("change", opts))
)
cls.deleteuser.user_permissions.add(
get_perm(UserProxy, get_permission_codename("delete", opts))
)
# UserProxy instances.
cls.user_proxy = UserProxy.objects.create(
username="user_proxy", password="secret"
)
def test_add(self):
self.client.force_login(self.adduser)
url = reverse("admin:admin_views_userproxy_add")
data = {
"username": "can_add",
"password": "secret",
"date_joined_0": "2019-01-15",
"date_joined_1": "16:59:10",
}
response = self.client.post(url, data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue(UserProxy.objects.filter(username="can_add").exists())
def test_view(self):
self.client.force_login(self.viewuser)
response = self.client.get(reverse("admin:admin_views_userproxy_changelist"))
self.assertContains(response, "<h1>Select user proxy to view</h1>")
response = self.client.get(
reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
)
self.assertContains(response, "<h1>View user proxy</h1>")
self.assertContains(response, '<div class="readonly">user_proxy</div>')
def test_change(self):
self.client.force_login(self.changeuser)
data = {
"password": self.user_proxy.password,
"username": self.user_proxy.username,
"date_joined_0": self.user_proxy.date_joined.strftime("%Y-%m-%d"),
"date_joined_1": self.user_proxy.date_joined.strftime("%H:%M:%S"),
"first_name": "first_name",
}
url = reverse("admin:admin_views_userproxy_change", args=(self.user_proxy.pk,))
response = self.client.post(url, data)
self.assertRedirects(
response, reverse("admin:admin_views_userproxy_changelist")
)
self.assertEqual(
UserProxy.objects.get(pk=self.user_proxy.pk).first_name, "first_name"
)
def test_delete(self):
self.client.force_login(self.deleteuser)
url = reverse("admin:admin_views_userproxy_delete", args=(self.user_proxy.pk,))
response = self.client.post(url, {"post": "yes"}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertFalse(UserProxy.objects.filter(pk=self.user_proxy.pk).exists())
@override_settings(ROOT_URLCONF="admin_views.urls")
| AdminViewProxyModelPermissionsTests |
python | apache__airflow | helm-tests/tests/helm_tests/other/test_limit_ranges.py | {
"start": 900,
"end": 1486
} | class ____:
"""Tests limit ranges."""
def test_limit_ranges_template(self):
docs = render_chart(
values={"limits": [{"max": {"cpu": "500m"}, "min": {"min": "200m"}, "type": "Container"}]},
show_only=["templates/limitrange.yaml"],
)
assert jmespath.search("kind", docs[0]) == "LimitRange"
assert jmespath.search("spec.limits[0].max.cpu", docs[0]) == "500m"
def test_limit_ranges_are_not_added_by_default(self):
docs = render_chart(show_only=["templates/limitrange.yaml"])
assert docs == []
| TestLimitRanges |
python | matplotlib__matplotlib | lib/matplotlib/text.py | {
"start": 53681,
"end": 59786
} | class ____:
def __init__(self,
xy,
xycoords='data',
annotation_clip=None):
x, y = xy # Make copy when xy is an array (and check the shape).
self.xy = x, y
self.xycoords = xycoords
self.set_annotation_clip(annotation_clip)
self._draggable = None
def _get_xy(self, renderer, xy, coords):
x, y = xy
xcoord, ycoord = coords if isinstance(coords, tuple) else (coords, coords)
if xcoord == 'data':
x = float(self.convert_xunits(x))
if ycoord == 'data':
y = float(self.convert_yunits(y))
return self._get_xy_transform(renderer, coords).transform((x, y))
def _get_xy_transform(self, renderer, coords):
if isinstance(coords, tuple):
xcoord, ycoord = coords
from matplotlib.transforms import blended_transform_factory
tr1 = self._get_xy_transform(renderer, xcoord)
tr2 = self._get_xy_transform(renderer, ycoord)
return blended_transform_factory(tr1, tr2)
elif callable(coords):
tr = coords(renderer)
if isinstance(tr, BboxBase):
return BboxTransformTo(tr)
elif isinstance(tr, Transform):
return tr
else:
raise TypeError(
f"xycoords callable must return a BboxBase or Transform, not a "
f"{type(tr).__name__}")
elif isinstance(coords, Artist):
bbox = coords.get_window_extent(renderer)
return BboxTransformTo(bbox)
elif isinstance(coords, BboxBase):
return BboxTransformTo(coords)
elif isinstance(coords, Transform):
return coords
elif not isinstance(coords, str):
raise TypeError(
f"'xycoords' must be an instance of str, tuple[str, str], Artist, "
f"Transform, or Callable, not a {type(coords).__name__}")
if coords == 'data':
return self.axes.transData
elif coords == 'polar':
from matplotlib.projections import PolarAxes
return PolarAxes.PolarTransform() + self.axes.transData
try:
bbox_name, unit = coords.split()
except ValueError: # i.e. len(coords.split()) != 2.
raise ValueError(f"{coords!r} is not a valid coordinate") from None
bbox0, xy0 = None, None
# if unit is offset-like
if bbox_name == "figure":
bbox0 = self.get_figure(root=False).figbbox
elif bbox_name == "subfigure":
bbox0 = self.get_figure(root=False).bbox
elif bbox_name == "axes":
bbox0 = self.axes.bbox
# reference x, y in display coordinate
if bbox0 is not None:
xy0 = bbox0.p0
elif bbox_name == "offset":
xy0 = self._get_position_xy(renderer)
else:
raise ValueError(f"{coords!r} is not a valid coordinate")
if unit == "points":
tr = Affine2D().scale(
self.get_figure(root=True).dpi / 72) # dpi/72 dots per point
elif unit == "pixels":
tr = Affine2D()
elif unit == "fontsize":
tr = Affine2D().scale(
self.get_size() * self.get_figure(root=True).dpi / 72)
elif unit == "fraction":
tr = Affine2D().scale(*bbox0.size)
else:
raise ValueError(f"{unit!r} is not a recognized unit")
return tr.translate(*xy0)
def set_annotation_clip(self, b):
"""
Set the annotation's clipping behavior.
Parameters
----------
b : bool or None
- True: The annotation will be clipped when ``self.xy`` is
outside the Axes.
- False: The annotation will always be drawn.
- None: The annotation will be clipped when ``self.xy`` is
outside the Axes and ``self.xycoords == "data"``.
"""
self._annotation_clip = b
def get_annotation_clip(self):
"""
Return the annotation's clipping behavior.
See `set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def _get_position_xy(self, renderer):
"""Return the pixel position of the annotated point."""
return self._get_xy(renderer, self.xy, self.xycoords)
def _check_xy(self, renderer=None):
"""Check whether the annotation at *xy_pixel* should be drawn."""
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
b = self.get_annotation_clip()
if b or (b is None and self.xycoords == "data"):
# check if self.xy is inside the Axes.
xy_pixel = self._get_position_xy(renderer)
return self.axes.contains_point(xy_pixel)
return True
def draggable(self, state=None, use_blit=False):
"""
Set whether the annotation is draggable with the mouse.
Parameters
----------
state : bool or None
- True or False: set the draggability.
- None: toggle the draggability.
use_blit : bool, default: False
Use blitting for faster image composition. For details see
:ref:`func-animation`.
Returns
-------
DraggableAnnotation or None
If the annotation is draggable, the corresponding
`.DraggableAnnotation` helper is returned.
"""
from matplotlib.offsetbox import DraggableAnnotation
is_draggable = self._draggable is not None
# if state is None we'll toggle
if state is None:
state = not is_draggable
if state:
if self._draggable is None:
self._draggable = DraggableAnnotation(self, use_blit)
else:
if self._draggable is not None:
self._draggable.disconnect()
self._draggable = None
return self._draggable
| _AnnotationBase |
python | getsentry__sentry | src/sentry/services/nodestore/base.py | {
"start": 728,
"end": 10368
} | class ____(local, Service):
"""
Nodestore is a key-value store that is used to store event payloads. It comes in two flavors:
* Django backend, which is just KV-store implemented on top of postgres.
* Bigtable backend
Keys (ids) in nodestore are strings, and values (nodes) are
JSON-serializable objects. Nodestore additionally has the concept of
subkeys, which are additional JSON payloads that should be stored together
with the same "main" value. Internally those values are concatenated and
compressed as one bytestream which makes them compress very well. This:
>>> nodestore.set("key1", "my key")
>>> nodestore.set("key1.1", "my key 2")
>>> nodestore.get("key1")
"my key"
>>> nodestore.get("key1.1")
"my key 2"
...very likely takes more space than:
>>> nodestore.set_subkeys("key1", {None: "my key", "1": "my key 2"})
>>> nodestore.get("key1")
"my key"
>>> nodestore.get("key1", subkey="1")
"my key 2"
...simply because compressing "my key<SEPARATOR>my key 2" yields better
compression ratio than compressing each key individually.
This is used in reprocessing to store a snapshot of the event from multiple
stages of the pipeline.
"""
__all__ = (
"delete",
"delete_multi",
"get",
"get_bytes",
"get_multi",
"set",
"set_bytes",
"set_subkeys",
"cleanup",
"validate",
"bootstrap",
)
def delete(self, id: str) -> None:
"""
>>> nodestore.delete('key1')
"""
raise NotImplementedError
def delete_multi(self, id_list: list[str]) -> None:
"""
Delete multiple nodes.
Note: This is not guaranteed to be atomic and may result in a partial
delete.
>>> delete_multi(['key1', 'key2'])
"""
for id in id_list:
self.delete(id)
def _decode(self, value: None | bytes, subkey: str | None) -> Any | None:
if value is None:
return None
lines_iter = iter(value.splitlines())
try:
if subkey is not None:
# Those keys should be statically known identifiers in the app, such as
# "unprocessed_event". There is really no reason to allow anything but
# ASCII here.
_subkey = subkey.encode("ascii")
next(lines_iter)
for line in lines_iter:
if line.strip() == _subkey:
break
next(lines_iter)
return json_loads(next(lines_iter))
except StopIteration:
return None
def get_bytes(self, id: str) -> bytes | None:
"""
>>> nodestore._get_bytes('key1')
b'{"message": "hello world"}'
"""
return self._get_bytes(id)
def _get_bytes(self, id: str) -> bytes | None:
raise NotImplementedError
@metrics.wraps("nodestore.get.duration")
def get(self, id: str, subkey: str | None = None) -> Any:
"""
>>> nodestore.get('key1')
{"message": "hello world"}
"""
with sentry_sdk.start_span(op="nodestore.get") as span:
span.set_tag("node_id", id)
if subkey is None:
item_from_cache = self._get_cache_item(id)
if item_from_cache:
metrics.incr("nodestore.get", tags={"cache": "hit"})
span.set_tag("origin", "from_cache")
span.set_tag("found", bool(item_from_cache))
return item_from_cache
span.set_tag("subkey", str(subkey))
bytes_data = self._get_bytes(id)
rv = self._decode(bytes_data, subkey=subkey)
if subkey is None:
# set cache item only after we know decoding did not fail
self._set_cache_item(id, rv)
span.set_tag("result", "from_service")
if bytes_data:
span.set_tag("bytes.size", len(bytes_data))
span.set_tag("found", bool(rv))
metrics.incr("nodestore.get", tags={"cache": "miss", "found": bool(rv)})
return rv
def _get_bytes_multi(self, id_list: list[str]) -> dict[str, bytes | None]:
"""
>>> nodestore._get_bytes_multi(['key1', 'key2')
{
"key1": b'{"message": "hello world"}',
"key2": b'{"message": "hello world"}'
}
"""
return {id: self._get_bytes(id) for id in id_list}
def get_multi(self, id_list: list[str], subkey: str | None = None) -> dict[str, Any | None]:
"""
>>> nodestore.get_multi(['key1', 'key2')
{
"key1": {"message": "hello world"},
"key2": {"message": "hello world"}
}
"""
with sentry_sdk.start_span(op="nodestore.get_multi") as span:
# Deduplicate ids, preserving order
id_list = list(dict.fromkeys(id_list))
span.set_tag("subkey", str(subkey))
span.set_tag("num_ids", len(id_list))
if subkey is None:
cache_items = self._get_cache_items(id_list)
if len(cache_items) == len(id_list):
span.set_tag("result", "from_cache")
return cache_items
uncached_ids = [id for id in id_list if id not in cache_items]
else:
uncached_ids = id_list
with sentry_sdk.start_span(op="nodestore._get_bytes_multi_and_decode") as span:
items = {
id: self._decode(value, subkey=subkey)
for id, value in self._get_bytes_multi(uncached_ids).items()
}
if subkey is None:
self._set_cache_items(items)
items.update(cache_items)
span.set_tag("result", "from_service")
span.set_tag("found", len(items))
return items
def _encode(self, data: dict[str | None, Mapping[str, Any]]) -> bytes:
"""
Encode data dict in a way where its keys can be deserialized
independently. A `None` key must always be present which is served as
the "default" subkey (the regular event payload).
>>> _encode({"unprocessed": {}, None: {"stacktrace": {}}})
b'{"stacktrace": {}}\nunprocessed\n{}'
"""
lines = [json_dumps(data.pop(None)).encode("utf8")]
for key, value in data.items():
if key is not None:
lines.append(key.encode("ascii"))
lines.append(json_dumps(value).encode("utf8"))
return b"\n".join(lines)
def set_bytes(self, item_id: str, data: bytes, ttl: timedelta | None = None) -> None:
"""
>>> nodestore.set_bytes('key1', b"{'foo': 'bar'}")
"""
metrics.distribution("nodestore.set_bytes", len(data))
return self._set_bytes(item_id, data, ttl)
def _set_bytes(self, item_id: str, data: bytes, ttl: timedelta | None = None) -> None:
raise NotImplementedError
def set(self, item_id: str, data: Mapping[str, Any], ttl: timedelta | None = None) -> None:
"""
Set value for `item_id`. Note that this deletes existing subkeys for `item_id` as
well, use `set_subkeys` to write a value + subkeys.
>>> nodestore.set('key1', {'foo': 'bar'})
"""
return self.set_subkeys(item_id, {None: data}, ttl=ttl)
@sentry_sdk.tracing.trace
def set_subkeys(
self, item_id: str, data: dict[str | None, Mapping[str, Any]], ttl: timedelta | None = None
) -> None:
"""
Set value for `item_id` and its subkeys.
>>> nodestore.set_subkeys('key1', {
... None: {'foo': 'bar'},
... "reprocessing": {'foo': 'bam'},
... })
>>> nodestore.get('key1')
{'foo': 'bar'}
>>> nodestore.get('key1', subkey='reprocessing')
{'foo': 'bam'}
"""
cache_item = data.get(None)
bytes_data = self._encode(data)
self.set_bytes(item_id, bytes_data, ttl=ttl)
# set cache only after encoding and write to nodestore has succeeded
if options.get("nodestore.set-subkeys.enable-set-cache-item"):
self._set_cache_item(item_id, cache_item)
def cleanup(self, cutoff_timestamp: datetime) -> None:
raise NotImplementedError
def bootstrap(self) -> None:
raise NotImplementedError
def _get_cache_item(self, item_id: str) -> Any | None:
if self.cache:
return self.cache.get(item_id)
return None
@sentry_sdk.tracing.trace
def _get_cache_items(self, id_list: list[str]) -> dict[str, Any]:
if self.cache:
return self.cache.get_many(id_list)
return {}
def _set_cache_item(self, item_id: str, data: Any) -> None:
if self.cache and data:
self.cache.set(item_id, data)
@sentry_sdk.tracing.trace
def _set_cache_items(self, items: dict[Any, Any]) -> None:
if self.cache:
self.cache.set_many(items)
def _delete_cache_item(self, item_id: str) -> None:
if self.cache:
self.cache.delete(item_id)
def _delete_cache_items(self, id_list: list[str]) -> None:
if self.cache:
self.cache.delete_many([item_id for item_id in id_list])
@cached_property
def cache(self) -> BaseCache | None:
try:
return caches["nodedata"]
except InvalidCacheBackendError:
return None
| NodeStorage |
python | mlflow__mlflow | mlflow/store/analytics/trace_correlation.py | {
"start": 221,
"end": 804
} | class ____:
"""
Count statistics for trace correlation analysis.
This dataclass encapsulates the four fundamental counts needed
for correlation analysis between two trace filters.
Attributes:
total_count: Total number of traces in the experiment(s)
filter1_count: Number of traces matching filter 1
filter2_count: Number of traces matching filter 2
joint_count: Number of traces matching both filters
"""
total_count: int
filter1_count: int
filter2_count: int
joint_count: int
@dataclass
| TraceCorrelationCounts |
python | django__django | tests/managers_regress/tests.py | {
"start": 6906,
"end": 10947
} | class ____(SimpleTestCase):
def test_implicit_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = CustomManager()
class Meta:
abstract = True
class PlainModel(models.Model):
custom_manager = CustomManager()
self.assertIsInstance(PlainModel._base_manager, models.Manager)
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, models.Manager)
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, models.Manager)
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, models.Manager)
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_default_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = "custom_manager"
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
default_manager_name = "custom_manager"
self.assertIsInstance(PlainModel._default_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._default_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._default_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._default_manager, CustomManager)
def test_base_manager_inheritance(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = "custom_manager"
abstract = True
class PlainModel(models.Model):
another_manager = models.Manager()
custom_manager = CustomManager()
class Meta:
base_manager_name = "custom_manager"
self.assertIsInstance(PlainModel._base_manager, CustomManager)
class ModelWithAbstractParent(AbstractModel):
pass
self.assertIsInstance(ModelWithAbstractParent._base_manager, CustomManager)
class ProxyModel(PlainModel):
class Meta:
proxy = True
self.assertIsInstance(ProxyModel._base_manager, CustomManager)
class MTIModel(PlainModel):
pass
self.assertIsInstance(MTIModel._base_manager, CustomManager)
def test_manager_no_duplicates(self):
class CustomManager(models.Manager):
pass
class AbstractModel(models.Model):
custom_manager = models.Manager()
class Meta:
abstract = True
class TestModel(AbstractModel):
custom_manager = CustomManager()
self.assertEqual(TestModel._meta.managers, (TestModel.custom_manager,))
self.assertEqual(
TestModel._meta.managers_map, {"custom_manager": TestModel.custom_manager}
)
def test_manager_class_getitem(self):
self.assertIs(models.Manager[Child1], models.Manager)
| TestManagerInheritance |
python | jazzband__django-formtools | tests/wizard/test_forms.py | {
"start": 2405,
"end": 2561
} | class ____(TestWizard):
form_list = [Step1]
def get_form_list(self):
return {'start': Step1, 'step2': Step2}
| TestWizardWithCustomGetFormList |
python | google__python-fire | fire/console/console_attr.py | {
"start": 3024,
"end": 3238
} | class ____(object):
"""Box/line drawing characters.
The element names are from ISO 8879:1986//ENTITIES Box and Line Drawing//EN:
http://www.w3.org/2003/entities/iso8879doc/isobox.html
"""
| BoxLineCharacters |
python | FactoryBoy__factory_boy | factory/declarations.py | {
"start": 9820,
"end": 11116
} | class ____(BaseDeclaration):
"""Variant of LazyAttribute, also receives the containers of the object.
Attributes:
function (function): A function, expecting the current LazyStub and the
(optional) object having a subfactory containing this attribute.
strict (bool): Whether evaluating should fail when the containers are
not passed in (i.e used outside a SubFactory).
"""
def __init__(self, function, strict=True):
super().__init__()
self.function = function
self.strict = strict
def evaluate(self, instance, step, extra):
"""Evaluate the current ContainerAttribute.
Args:
obj (LazyStub): a lazy stub of the object being constructed, if
needed.
containers (list of LazyStub): a list of lazy stubs of factories
being evaluated in a chain, each item being a future field of
next one.
"""
# Strip the current instance from the chain
chain = step.chain[1:]
if self.strict and not chain:
raise TypeError(
"A ContainerAttribute in 'strict' mode can only be used "
"within a SubFactory.")
return self.function(instance, chain)
| ContainerAttribute |
python | tiangolo__fastapi | tests/test_response_model_include_exclude.py | {
"start": 103,
"end": 156
} | class ____(BaseModel):
foo: str
bar: str
| Model1 |
python | doocs__leetcode | solution/0300-0399/0304.Range Sum Query 2D - Immutable/Solution.py | {
"start": 0,
"end": 769
} | class ____:
def __init__(self, matrix: List[List[int]]):
m, n = len(matrix), len(matrix[0])
self.s = [[0] * (n + 1) for _ in range(m + 1)]
for i, row in enumerate(matrix):
for j, v in enumerate(row):
self.s[i + 1][j + 1] = (
self.s[i][j + 1] + self.s[i + 1][j] - self.s[i][j] + v
)
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
return (
self.s[row2 + 1][col2 + 1]
- self.s[row2 + 1][col1]
- self.s[row1][col2 + 1]
+ self.s[row1][col1]
)
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
| NumMatrix |
python | PrefectHQ__prefect | src/prefect/server/orchestration/rules.py | {
"start": 44504,
"end": 44618
} | class ____(
BaseUniversalTransform[orm_models.FlowRun, core.FlowRunPolicy]
):
pass
| FlowRunUniversalTransform |
python | scipy__scipy | scipy/spatial/tests/test_kdtree.py | {
"start": 11936,
"end": 12368
} | class ____(ball_consistency):
# allow some roundoff errors due to numerical issues
tol = 1e-13
def setup_method(self):
n = 1000
m = 2
np.random.seed(123)
self.data = np.random.randint(100, 1000, size=(n, m))
self.T = self.kdtree_type(self.data)
self.x = self.data
self.p = 100
self.eps = 0
self.d = 10
@KDTreeTest
| _Test_random_ball_largep_issue9890 |
python | realpython__materials | python-self-type/accounts.py | {
"start": 85,
"end": 618
} | class ____:
account_number: int
balance: float
def display_balance(self) -> Self:
print(f"Account Number: {self.account_number}")
print(f"Balance: ${self.balance:,.2f}\n")
return self
def deposit(self, amount: float) -> Self:
self.balance += amount
return self
def withdraw(self, amount: float) -> Self:
if self.balance >= amount:
self.balance -= amount
else:
print("Insufficient balance")
return self
@dataclass
| BankAccount |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 93907,
"end": 94969
} | class ____(LinprogHiGHSTests):
method = "highs-ds"
options = {}
def test_lad_regression(self):
'''
The scaled model should be optimal, i.e. not produce unscaled model
infeasible. See https://github.com/ERGO-Code/HiGHS/issues/494.
'''
# Test to ensure gh-13610 is resolved (mismatch between HiGHS scaled
# and unscaled model statuses)
c, A_ub, b_ub, bnds = l1_regression_prob()
res = linprog(c, A_ub=A_ub, b_ub=b_ub, bounds=bnds,
method=self.method, options=self.options)
assert_equal(res.status, 0)
assert_(res.x is not None)
assert_(np.all(res.slack > -1e-6))
assert_(np.all(res.x <= [np.inf if ub is None else ub
for lb, ub in bnds]))
assert_(np.all(res.x >= [-np.inf if lb is None else lb - 1e-7
for lb, ub in bnds]))
###################################
# HiGHS-IPM Option-Specific Tests #
###################################
| TestLinprogHiGHSSimplexDual |
python | scikit-learn__scikit-learn | sklearn/tests/test_pipeline.py | {
"start": 2866,
"end": 2994
} | class ____(NoInvTransf):
def transform(self, X):
return X
def inverse_transform(self, X):
return X
| Transf |
python | facebookresearch__faiss | tests/test_index.py | {
"start": 734,
"end": 3379
} | class ____(unittest.TestCase):
def do_test(self, nq, metric_type=faiss.METRIC_L2, k=10):
d = 32
nb = 1000
nt = 0
(xt, xb, xq) = get_dataset_2(d, nt, nb, nq)
index = faiss.IndexFlat(d, metric_type)
### k-NN search
index.add(xb)
D1, I1 = index.search(xq, k)
if metric_type == faiss.METRIC_L2:
all_dis = ((xq.reshape(nq, 1, d) - xb.reshape(1, nb, d)) ** 2).sum(2)
Iref = all_dis.argsort(axis=1)[:, :k]
else:
all_dis = np.dot(xq, xb.T)
Iref = all_dis.argsort(axis=1)[:, ::-1][:, :k]
Dref = all_dis[np.arange(nq)[:, None], Iref]
# not too many elements are off.
self.assertLessEqual((Iref != I1).sum(), Iref.size * 0.0002)
# np.testing.assert_equal(Iref, I1)
np.testing.assert_almost_equal(Dref, D1, decimal=5)
### Range search
radius = float(np.median(Dref[:, -1]))
lims, D2, I2 = index.range_search(xq, radius)
for i in range(nq):
l0, l1 = lims[i:i + 2]
_, Il = D2[l0:l1], I2[l0:l1]
if metric_type == faiss.METRIC_L2:
Ilref, = np.where(all_dis[i] < radius)
else:
Ilref, = np.where(all_dis[i] > radius)
Il.sort()
Ilref.sort()
np.testing.assert_equal(Il, Ilref)
np.testing.assert_almost_equal(
all_dis[i, Ilref], D2[l0:l1],
decimal=5
)
def set_blas_blocks(self, small):
if small:
faiss.cvar.distance_compute_blas_query_bs = 16
faiss.cvar.distance_compute_blas_database_bs = 12
else:
faiss.cvar.distance_compute_blas_query_bs = 4096
faiss.cvar.distance_compute_blas_database_bs = 1024
def test_with_blas(self):
self.set_blas_blocks(small=True)
self.do_test(200)
self.set_blas_blocks(small=False)
def test_noblas(self):
self.do_test(10)
def test_with_blas_ip(self):
self.set_blas_blocks(small=True)
self.do_test(200, faiss.METRIC_INNER_PRODUCT)
self.set_blas_blocks(small=False)
def test_noblas_ip(self):
self.do_test(10, faiss.METRIC_INNER_PRODUCT)
def test_noblas_reservoir(self):
self.do_test(10, k=150)
def test_with_blas_reservoir(self):
self.do_test(200, k=150)
def test_noblas_reservoir_ip(self):
self.do_test(10, faiss.METRIC_INNER_PRODUCT, k=150)
def test_with_blas_reservoir_ip(self):
self.do_test(200, faiss.METRIC_INNER_PRODUCT, k=150)
| TestIndexFlat |
python | doocs__leetcode | solution/0300-0399/0307.Range Sum Query - Mutable/Solution2.py | {
"start": 1353,
"end": 1831
} | class ____:
__slots__ = ["tree"]
def __init__(self, nums: List[int]):
self.tree = SegmentTree(nums)
def update(self, index: int, val: int) -> None:
self.tree.modify(1, index + 1, val)
def sumRange(self, left: int, right: int) -> int:
return self.tree.query(1, left + 1, right + 1)
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# obj.update(index,val)
# param_2 = obj.sumRange(left,right)
| NumArray |
python | lxml__lxml | src/lxml/tests/test_elementtree.py | {
"start": 1478,
"end": 131335
} | class ____(helper_base):
etree = None
required_versions_ET = {}
def XMLParser(self, **kwargs):
try:
XMLParser = self.etree.XMLParser
except AttributeError:
assert 'ElementTree' in self.etree.__name__
XMLParser = self.etree.TreeBuilder
return XMLParser(**kwargs)
@et_needs_pyversion(3, 6)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(self.etree.iselement(element), msg="not an element")
direlem = dir(element)
for attr in 'tag', 'attrib', 'text', 'tail':
self.assertTrue(hasattr(element, attr),
msg='no %s member' % attr)
self.assertIn(attr, direlem,
msg='no %s visible by dir' % attr)
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = self.etree.Element("tag")
check_element(element)
tree = self.etree.ElementTree(element)
check_element(tree.getroot())
element = self.etree.Element("t\xe4g", key="value")
tree = self.etree.ElementTree(element)
# lxml and ET Py2: slightly different repr()
#self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = self.etree.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
# Removed in Py3.9
#check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
# Removed in Py3.9
#check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
# not an alias in lxml
#self.assertEqual(self.etree.XML, self.etree.fromstring)
self.assertEqual(self.etree.PI, self.etree.ProcessingInstruction)
def test_element(self):
for i in range(10):
e = self.etree.Element('foo')
self.assertEqual(e.tag, 'foo')
self.assertEqual(e.text, None)
self.assertEqual(e.tail, None)
def test_simple(self):
Element = self.etree.Element
root = Element('root')
root.append(Element('one'))
root.append(Element('two'))
root.append(Element('three'))
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
self.assertRaises(IndexError, operator.getitem, root, 3)
# test weird dictionary interaction leading to segfault previously
def test_weird_dict_interaction(self):
root = self.etree.Element('root')
self.assertEqual(root.tag, "root")
add = self.etree.ElementTree(file=BytesIO(b'<foo>Foo</foo>'))
self.assertEqual(add.getroot().tag, "foo")
self.assertEqual(add.getroot().text, "Foo")
root.append(self.etree.Element('baz'))
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "baz")
def test_subelement(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'one')
SubElement(root, 'two')
SubElement(root, 'three')
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
def test_element_contains(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root1 = Element('root')
SubElement(root1, 'one')
self.assertTrue(root1[0] in root1)
root2 = Element('root')
SubElement(root2, 'two')
SubElement(root2, 'three')
self.assertTrue(root2[0] in root2)
self.assertTrue(root2[1] in root2)
self.assertFalse(root1[0] in root2)
self.assertFalse(root2[0] in root1)
self.assertFalse(None in root2)
def test_element_indexing_with_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc>Test<one>One</one></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(1, len(root))
self.assertEqual('one', root[0].tag)
self.assertRaises(IndexError, operator.getitem, root, 1)
def test_element_indexing_with_text2(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc><one>One</one><two>Two</two>hm<three>Three</three></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(3, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
self.assertEqual('three', root[2].tag)
def test_element_indexing_only_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc>Test</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(0, len(root))
def test_element_indexing_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(d, a[-1])
self.assertEqual(c, a[-2])
self.assertEqual(b, a[-3])
self.assertRaises(IndexError, operator.getitem, a, -4)
a[-1] = e = Element('e')
self.assertEqual(e, a[-1])
del a[-1]
self.assertEqual(2, len(a))
def test_elementtree(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc><one>One</one><two>Two</two></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(2, len(root))
self.assertEqual('one', root[0].tag)
self.assertEqual('two', root[1].tag)
def test_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc>This is a text</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('This is a text', root.text)
def test_text_empty(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(None, root.text)
def test_text_other(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc><one>One</one></doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(None, root.text)
self.assertEqual('One', root[0].text)
def test_text_escape_in(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc>This is > than a text</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('This is > than a text', root.text)
def test_text_escape_out(self):
Element = self.etree.Element
a = Element("a")
a.text = "<>&"
self.assertXML(b'<a><>&</a>',
a)
def test_text_escape_tostring(self):
tostring = self.etree.tostring
Element = self.etree.Element
a = Element("a")
a.text = "<>&"
self.assertEqual(b'<a><>&</a>',
tostring(a))
def test_text_str_subclass(self):
Element = self.etree.Element
class strTest(str):
pass
a = Element("a")
a.text = strTest("text")
self.assertXML(b'<a>text</a>',
a)
def test_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc>This is <i>mixed</i> content.</doc>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(' content.', root[0].tail)
def test_tail_str_subclass(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
class strTest(str):
pass
a = Element("a")
SubElement(a, "t").tail = strTest("tail")
self.assertXML(b'<a><t></t>tail</a>',
a)
def _test_del_tail(self):
# this is discouraged for ET compat, should not be tested...
XML = self.etree.XML
root = XML(b'<doc>This is <i>mixed</i> content.</doc>')
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(' content.', root[0].tail)
del root[0].tail
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual(None, root[0].tail)
root[0].tail = "TAIL"
self.assertEqual(1, len(root))
self.assertEqual('This is ', root.text)
self.assertEqual(None, root.tail)
self.assertEqual('mixed', root[0].text)
self.assertEqual('TAIL', root[0].tail)
def test_ElementTree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
el = Element('hoi')
doc = ElementTree(el)
root = doc.getroot()
self.assertEqual(None, root.text)
self.assertEqual('hoi', root.tag)
def test_attrib(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
self.assertRaises(KeyError, operator.getitem, root.attrib, 'three')
def test_attrib_get(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib.get('one'))
self.assertEqual('Two', root.attrib.get('two'))
self.assertEqual(None, root.attrib.get('three'))
self.assertEqual('foo', root.attrib.get('three', 'foo'))
def test_attrib_dict(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = dict(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attrib_copy(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = copy.copy(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attrib_deepcopy(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
attrib = copy.deepcopy(root.attrib)
self.assertEqual('One', attrib['one'])
self.assertEqual('Two', attrib['two'])
self.assertRaises(KeyError, operator.getitem, attrib, 'three')
def test_attributes_get(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
self.assertEqual(None, root.get('three'))
self.assertEqual('foo', root.get('three', 'foo'))
def test_attrib_clear(self):
XML = self.etree.XML
root = XML(b'<doc one="One" two="Two"/>')
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
root.attrib.clear()
self.assertEqual(None, root.get('one'))
self.assertEqual(None, root.get('two'))
def test_attrib_set_clear(self):
Element = self.etree.Element
root = Element("root", one="One")
root.set("two", "Two")
self.assertEqual('One', root.get('one'))
self.assertEqual('Two', root.get('two'))
root.attrib.clear()
self.assertEqual(None, root.get('one'))
self.assertEqual(None, root.get('two'))
def test_attrib_ns_clear(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
attribNS = '{http://foo/bar}x'
parent = Element('parent')
parent.set(attribNS, 'a')
child = SubElement(parent, 'child')
child.set(attribNS, 'b')
self.assertEqual('a', parent.get(attribNS))
self.assertEqual('b', child.get(attribNS))
parent.clear()
self.assertEqual(None, parent.get(attribNS))
self.assertEqual('b', child.get(attribNS))
def test_attrib_pop(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<doc one="One" two="Two"/>')
doc = ElementTree(file=f)
root = doc.getroot()
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
self.assertEqual('One', root.attrib.pop('one'))
self.assertEqual(None, root.attrib.get('one'))
self.assertEqual('Two', root.attrib['two'])
def test_attrib_pop_unknown(self):
root = self.etree.XML(b'<doc one="One" two="Two"/>')
self.assertRaises(KeyError, root.attrib.pop, 'NONE')
self.assertEqual('One', root.attrib['one'])
self.assertEqual('Two', root.attrib['two'])
def test_attrib_pop_default(self):
root = self.etree.XML(b'<doc one="One" two="Two"/>')
self.assertEqual('Three', root.attrib.pop('three', 'Three'))
def test_attrib_pop_empty_default(self):
root = self.etree.XML(b'<doc/>')
self.assertEqual('Three', root.attrib.pop('three', 'Three'))
def test_attrib_pop_invalid_args(self):
root = self.etree.XML(b'<doc one="One" two="Two"/>')
self.assertRaises(TypeError, root.attrib.pop, 'One', None, None)
def test_attribute_update_dict(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta"/>')
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update({'alpha' : 'test', 'gamma' : 'Gamma'})
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_sequence(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta"/>')
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update({'alpha' : 'test', 'gamma' : 'Gamma'}.items())
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_iter(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta"/>')
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
root.attrib.update(iter({'alpha' : 'test', 'gamma' : 'Gamma'}.items()))
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_update_attrib(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta"/>')
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'Alpha'), ('beta', 'Beta')],
items)
other = XML(b'<doc alpha="test" gamma="Gamma"/>')
root.attrib.update(other.attrib)
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('alpha', 'test'), ('beta', 'Beta'), ('gamma', 'Gamma')],
items)
def test_attribute_keys(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>')
keys = list(root.attrib.keys())
keys.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], keys)
def test_attribute_keys2(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>')
keys = list(root.keys())
keys.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], keys)
def test_attribute_items2(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>')
items = list(root.items())
items.sort()
self.assertEqual(
[('alpha','Alpha'), ('beta','Beta'), ('gamma','Gamma')],
items)
def test_attribute_keys_ns(self):
XML = self.etree.XML
root = XML(b'<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />')
keys = list(root.keys())
keys.sort()
self.assertEqual(['bar', '{http://ns.codespeak.net/test}baz'],
keys)
def test_attribute_values(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>')
values = list(root.attrib.values())
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
def test_attribute_values_ns(self):
XML = self.etree.XML
root = XML(b'<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />')
values = list(root.attrib.values())
values.sort()
self.assertEqual(
['Bar', 'Baz'], values)
def test_attribute_items(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>')
items = list(root.attrib.items())
items.sort()
self.assertEqual([
('alpha', 'Alpha'),
('beta', 'Beta'),
('gamma', 'Gamma'),
],
items)
def test_attribute_items_ns(self):
XML = self.etree.XML
root = XML(b'<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />')
items = list(root.attrib.items())
items.sort()
self.assertEqual(
[('bar', 'Bar'), ('{http://ns.codespeak.net/test}baz', 'Baz')],
items)
def test_attribute_str(self):
XML = self.etree.XML
expected = "{'{http://ns.codespeak.net/test}baz': 'Baz', 'bar': 'Bar'}"
alternative = "{'bar': 'Bar', '{http://ns.codespeak.net/test}baz': 'Baz'}"
root = XML(b'<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />')
try:
self.assertEqual(expected, str(root.attrib))
except AssertionError:
self.assertEqual(alternative, str(root.attrib))
def test_attribute_contains(self):
XML = self.etree.XML
root = XML(b'<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />')
self.assertEqual(
True, 'bar' in root.attrib)
self.assertEqual(
False, 'baz' in root.attrib)
self.assertEqual(
False, 'hah' in root.attrib)
self.assertEqual(
True,
'{http://ns.codespeak.net/test}baz' in root.attrib)
def test_attribute_set(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
def test_attrib_as_attrib(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.attrib["attr"])
root2 = Element("root2", root.attrib)
self.assertEqual("TEST", root2.attrib["attr"])
def test_attribute_iterator(self):
XML = self.etree.XML
root = XML(b'<doc alpha="Alpha" beta="Beta" gamma="Gamma" />')
result = []
for key in root.attrib:
result.append(key)
result.sort()
self.assertEqual(['alpha', 'beta', 'gamma'], result)
def test_attribute_manipulation(self):
Element = self.etree.Element
a = Element('a')
a.attrib['foo'] = 'Foo'
a.attrib['bar'] = 'Bar'
self.assertEqual('Foo', a.attrib['foo'])
del a.attrib['foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_del_attribute_ns(self):
Element = self.etree.Element
a = Element('a')
a.attrib['{http://a/}foo'] = 'Foo'
a.attrib['{http://a/}bar'] = 'Bar'
self.assertEqual(None, a.get('foo'))
self.assertEqual('Foo', a.get('{http://a/}foo'))
self.assertEqual('Foo', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.delitem, a.attrib, 'foo')
self.assertEqual('Foo', a.attrib['{http://a/}foo'])
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_del_attribute_ns_parsed(self):
XML = self.etree.XML
a = XML(b'<a xmlns:nsa="http://a/" nsa:foo="FooNS" foo="Foo" />')
self.assertEqual('Foo', a.attrib['foo'])
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
del a.attrib['foo']
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
self.assertRaises(KeyError, operator.delitem, a.attrib, 'foo')
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, '{http://a/}foo')
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
a = XML(b'<a xmlns:nsa="http://a/" foo="Foo" nsa:foo="FooNS" />')
self.assertEqual('Foo', a.attrib['foo'])
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
del a.attrib['foo']
self.assertEqual('FooNS', a.attrib['{http://a/}foo'])
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
del a.attrib['{http://a/}foo']
self.assertRaises(KeyError, operator.getitem, a.attrib, '{http://a/}foo')
self.assertRaises(KeyError, operator.getitem, a.attrib, 'foo')
def test_XML(self):
XML = self.etree.XML
root = XML(b'<doc>This is a text.</doc>')
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_XMLID(self):
XMLID = self.etree.XMLID
XML = self.etree.XML
xml_text = b'''
<document>
<h1 id="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p id="warn1" class="warning">...</p>
</document>
'''
root, dic = XMLID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"note1" : root[1],
"warn1" : root[4]
}
self.assertEqual(dic, expected)
def test_fromstring(self):
fromstring = self.etree.fromstring
root = fromstring('<doc>This is a text.</doc>')
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_fromstring_memoryview(self):
fromstring = self.etree.fromstring
root = fromstring(memoryview(b'<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_fromstring_char_array(self):
fromstring = self.etree.fromstring
import array
root = fromstring(array.array('B', b'<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_fromstring_uchar_array(self):
fromstring = self.etree.fromstring
import array
root = fromstring(array.array('b', b'<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist'] = (1,3)
def test_fromstringlist(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(["<do", "c>T", "hi", "s is",
" a text.<", "/doc", ">"])
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist_characters'] = (1,3)
def test_fromstringlist_characters(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(list('<doc>This is a text.</doc>'))
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
required_versions_ET['test_fromstringlist_single'] = (1,3)
def test_fromstringlist_single(self):
fromstringlist = self.etree.fromstringlist
root = fromstringlist(['<doc>This is a text.</doc>'])
self.assertEqual(0, len(root))
self.assertEqual('This is a text.', root.text)
def test_iselement(self):
iselement = self.etree.iselement
Element = self.etree.Element
ElementTree = self.etree.ElementTree
XML = self.etree.XML
Comment = self.etree.Comment
ProcessingInstruction = self.etree.ProcessingInstruction
el = Element('hoi')
self.assertTrue(iselement(el))
el2 = XML(b'<foo/>')
self.assertTrue(iselement(el2))
tree = ElementTree(element=Element('dag'))
self.assertTrue(not iselement(tree))
self.assertTrue(iselement(tree.getroot()))
c = Comment('test')
self.assertTrue(iselement(c))
p = ProcessingInstruction("test", "some text")
self.assertTrue(iselement(p))
def test_iteration(self):
XML = self.etree.XML
root = XML(b'<doc><one/><two>Two</two>Hm<three/></doc>')
result = []
for el in root:
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iteration_empty(self):
XML = self.etree.XML
root = XML(b'<doc></doc>')
result = []
for el in root:
result.append(el.tag)
self.assertEqual([], result)
def test_iteration_text_only(self):
XML = self.etree.XML
root = XML(b'<doc>Text</doc>')
result = []
for el in root:
result.append(el.tag)
self.assertEqual([], result)
def test_iteration_set_tail_empty(self):
# this would cause a crash in the past
fromstring = self.etree.fromstring
root = fromstring('<html><p></p>x</html>')
for elem in root:
elem.tail = ''
def test_iteration_clear_tail(self):
# this would cause a crash in the past
fromstring = self.etree.fromstring
root = fromstring('<html><p></p>x</html>')
for elem in root:
elem.tail = None
def test_iteration_reversed(self):
XML = self.etree.XML
root = XML(b'<doc><one/><two>Two</two>Hm<three/></doc>')
result = []
for el in reversed(root):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iteration_subelement(self):
XML = self.etree.XML
root = XML(b'<doc><one/><two>Two</two>Hm<three/></doc>')
result = []
add = True
for el in root:
result.append(el.tag)
if add:
self.etree.SubElement(root, 'four')
add = False
self.assertEqual(['one', 'two', 'three', 'four'], result)
def test_iteration_del_child(self):
XML = self.etree.XML
root = XML(b'<doc><one/><two>Two</two>Hm<three/></doc>')
result = []
for el in root:
result.append(el.tag)
del root[-1]
self.assertEqual(['one', 'two'], result)
def test_iteration_double(self):
XML = self.etree.XML
root = XML(b'<doc><one/><two/></doc>')
result = []
for el0 in root:
result.append(el0.tag)
for el1 in root:
result.append(el1.tag)
self.assertEqual(['one','one', 'two', 'two', 'one', 'two'], result)
required_versions_ET['test_itertext'] = (1,3)
def test_itertext(self):
# ET 1.3+
XML = self.etree.XML
root = XML(b"<root>RTEXT<a></a>ATAIL<b/><c>CTEXT</c>CTAIL</root>")
text = list(root.itertext())
self.assertEqual(["RTEXT", "ATAIL", "CTEXT", "CTAIL"],
text)
required_versions_ET['test_itertext_child'] = (1,3)
def test_itertext_child(self):
# ET 1.3+
XML = self.etree.XML
root = XML(b"<root>RTEXT<a></a>ATAIL<b/><c>CTEXT</c>CTAIL</root>")
text = list(root[2].itertext())
self.assertEqual(["CTEXT"],
text)
def test_findall(self):
XML = self.etree.XML
root = XML(b'<a><b><c/></b><b/><c><b/></c></a>')
self.assertEqual(len(list(root.findall("c"))), 1)
self.assertEqual(len(list(root.findall(".//c"))), 2)
self.assertEqual(len(list(root.findall(".//b"))), 3)
self.assertEqual(len(list(root.findall(".//b"))[0]), 1)
self.assertEqual(len(list(root.findall(".//b"))[1]), 0)
self.assertEqual(len(list(root.findall(".//b"))[2]), 0)
def test_findall_ns(self):
XML = self.etree.XML
root = XML(b'<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>')
self.assertEqual(len(list(root.findall(".//{X}b"))), 2)
self.assertEqual(len(list(root.findall(".//b"))), 3)
self.assertEqual(len(list(root.findall("b"))), 2)
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_findall_wildcard(self):
def summarize_list(l):
return [el.tag for el in l]
root = self.etree.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
root.append(self.etree.Comment('test'))
self.assertEqual(summarize_list(root.findall("{*}b")),
['{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall("{*}c")),
['c'])
self.assertEqual(summarize_list(root.findall("{X}*")),
['{X}b'])
self.assertEqual(summarize_list(root.findall("{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall("{}*")),
['b', 'c'])
self.assertEqual(summarize_list(root.findall("{}b")), # only for consistency
['b'])
self.assertEqual(summarize_list(root.findall("{}b")),
summarize_list(root.findall("b")))
self.assertEqual(summarize_list(root.findall("{*}*")),
['{X}b', 'b', 'c', '{Y}b'])
self.assertEqual(summarize_list(root.findall("{*}*")
+ ([] if self.etree is etree else [root[-1]])),
summarize_list(root.findall("*")))
self.assertEqual(summarize_list(root.findall(".//{*}b")),
['{X}b', 'b', '{X}b', 'b', '{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{*}c")),
['c', 'c'])
self.assertEqual(summarize_list(root.findall(".//{X}*")),
['{X}b', '{X}b'])
self.assertEqual(summarize_list(root.findall(".//{Y}*")),
['{Y}b'])
self.assertEqual(summarize_list(root.findall(".//{}*")),
['c', 'b', 'c', 'b'])
self.assertEqual(summarize_list(root.findall(".//{}b")),
['b', 'b'])
def test_element_with_attributes_keywords(self):
Element = self.etree.Element
el = Element('tag', foo='Foo', bar='Bar')
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Bar', el.attrib['bar'])
def test_element_with_attributes(self):
Element = self.etree.Element
el = Element('tag', {'foo': 'Foo', 'bar': 'Bar'})
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Bar', el.attrib['bar'])
def test_element_with_attributes_extra(self):
Element = self.etree.Element
el = Element('tag', {'foo': 'Foo', 'bar': 'Bar'}, baz='Baz')
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Bar', el.attrib['bar'])
self.assertEqual('Baz', el.attrib['baz'])
def test_element_with_attributes_extra_duplicate(self):
Element = self.etree.Element
el = Element('tag', {'foo': 'Foo', 'bar': 'Bar'}, bar='Baz')
self.assertEqual('Foo', el.attrib['foo'])
self.assertEqual('Baz', el.attrib['bar'])
def test_element_with_attributes_ns(self):
Element = self.etree.Element
el = Element('tag', {'{ns1}foo':'Foo', '{ns2}bar':'Bar'})
self.assertEqual('Foo', el.attrib['{ns1}foo'])
self.assertEqual('Bar', el.attrib['{ns2}bar'])
def test_subelement_with_attributes(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('tag')
SubElement(el, 'foo', {'foo':'Foo'}, baz="Baz")
self.assertEqual("Baz", el[0].attrib['baz'])
self.assertEqual('Foo', el[0].attrib['foo'])
def test_subelement_with_attributes_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('tag')
SubElement(el, 'foo', {'{ns1}foo':'Foo', '{ns2}bar':'Bar'})
self.assertEqual('Foo', el[0].attrib['{ns1}foo'])
self.assertEqual('Bar', el[0].attrib['{ns2}bar'])
def test_write(self):
ElementTree = self.etree.ElementTree
XML = self.etree.XML
for i in range(10):
f = BytesIO()
root = XML(b'<doc%d>This is a test.</doc%d>' % (i, i))
tree = ElementTree(element=root)
tree.write(f)
data = f.getvalue()
self.assertEqual(
b'<doc%d>This is a test.</doc%d>' % (i, i),
canonicalize(data))
required_versions_ET['test_write_method_html'] = (1,3)
def test_write_method_html(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
SubElement = self.etree.SubElement
html = Element('html')
body = SubElement(html, 'body')
p = SubElement(body, 'p')
p.text = "html"
SubElement(p, 'br').tail = "test"
tree = ElementTree(element=html)
f = BytesIO()
tree.write(f, method="html")
data = f.getvalue().replace(b'\n',b'')
self.assertEqual(b'<html><body><p>html<br>test</p></body></html>',
data)
required_versions_ET['test_write_method_text'] = (1,3)
def test_write_method_text(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = "TAIL"
c = SubElement(a, 'c')
c.text = "C"
tree = ElementTree(element=a)
f = BytesIO()
tree.write(f, method="text")
data = f.getvalue()
self.assertEqual(b'ABTAILCtail',
data)
def test_write_fail(self):
ElementTree = self.etree.ElementTree
XML = self.etree.XML
tree = ElementTree( XML(b'<doc>This is a test.</doc>') )
self.assertRaises(IOError, tree.write,
"definitely////\\-\\nonexisting\\-\\////FILE")
# this could trigger a crash, apparently because the document
# reference was prematurely garbage collected
def test_crash(self):
Element = self.etree.Element
element = Element('tag')
for i in range(10):
element.attrib['key'] = 'value'
value = element.attrib['key']
self.assertEqual(value, 'value')
# from doctest; for some reason this caused crashes too
def test_write_ElementTreeDoctest(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
f = BytesIO()
for i in range(10):
element = Element('tag%s' % i)
self._check_element(element)
tree = ElementTree(element)
tree.write(f)
self._check_element_tree(tree)
def test_subelement_reference(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('foo')
el2 = SubElement(el, 'bar')
el3 = SubElement(el2, 'baz')
al = Element('foo2')
al2 = SubElement(al, 'bar2')
al3 = SubElement(al2, 'baz2')
# now move al2 into el
el.append(al2)
# now change al3 directly
al3.text = 'baz2-modified'
# it should have changed through this route too
self.assertEqual(
'baz2-modified',
el[1][0].text)
def test_set_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.text = 'hoi'
self.assertEqual(
'hoi',
a.text)
self.assertEqual(
'b',
a[0].tag)
def test_set_text2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'hoi'
b = SubElement(a ,'b')
self.assertEqual(
'hoi',
a.text)
self.assertEqual(
'b',
a[0].tag)
def test_set_text_none(self):
Element = self.etree.Element
a = Element('a')
a.text = 'foo'
a.text = None
self.assertEqual(
None,
a.text)
self.assertXML(b'<a></a>', a)
def test_set_text_empty(self):
Element = self.etree.Element
a = Element('a')
self.assertEqual(None, a.text)
a.text = ''
self.assertEqual('', a.text)
self.assertXML(b'<a></a>', a)
def test_tail1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.tail = 'dag'
self.assertEqual('dag',
a.tail)
b = SubElement(a, 'b')
b.tail = 'hoi'
self.assertEqual('hoi',
b.tail)
self.assertEqual('dag',
a.tail)
def test_tail_append(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
b.tail = 'b_tail'
a.append(b)
self.assertEqual('b_tail',
b.tail)
def test_tail_set_twice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'foo'
b.tail = 'bar'
self.assertEqual('bar',
b.tail)
self.assertXML(b'<a><b></b>bar</a>', a)
def test_tail_set_none(self):
Element = self.etree.Element
a = Element('a')
a.tail = 'foo'
a.tail = None
self.assertEqual(
None,
a.tail)
self.assertXML(b'<a></a>', a)
required_versions_ET['test_extend'] = (1,3)
def test_extend(self):
root = self.etree.Element('foo')
for i in range(3):
element = self.etree.SubElement(root, 'a%s' % i)
element.text = "text%d" % i
element.tail = "tail%d" % i
elements = []
for i in range(3):
new_element = self.etree.Element("test%s" % i)
new_element.text = "TEXT%s" % i
new_element.tail = "TAIL%s" % i
elements.append(new_element)
root.extend(elements)
self.assertEqual(
["a0", "a1", "a2", "test0", "test1", "test2"],
[ el.tag for el in root ])
self.assertEqual(
["text0", "text1", "text2", "TEXT0", "TEXT1", "TEXT2"],
[ el.text for el in root ])
self.assertEqual(
["tail0", "tail1", "tail2", "TAIL0", "TAIL1", "TAIL2"],
[ el.tail for el in root ])
def test_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
a = Element('a')
a.append(Comment('foo'))
self.assertEqual(a[0].tag, Comment)
self.assertEqual(a[0].text, 'foo')
# ElementTree < 1.3 adds whitespace around comments
required_versions_ET['test_comment_text'] = (1,3)
def test_comment_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
tostring = self.etree.tostring
a = Element('a')
a.append(Comment('foo'))
self.assertEqual(a[0].text, 'foo')
self.assertEqual(
b'<a><!--foo--></a>',
tostring(a))
a[0].text = "TEST"
self.assertEqual(a[0].text, 'TEST')
self.assertEqual(
b'<a><!--TEST--></a>',
tostring(a))
# ElementTree < 1.3 adds whitespace around comments
required_versions_ET['test_comment_whitespace'] = (1,3)
def test_comment_whitespace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
tostring = self.etree.tostring
a = Element('a')
a.append(Comment(' foo '))
self.assertEqual(a[0].text, ' foo ')
self.assertEqual(
b'<a><!-- foo --></a>',
tostring(a))
def test_comment_nonsense(self):
Comment = self.etree.Comment
c = Comment('foo')
self.assertEqual({}, c.attrib)
self.assertEqual([], list(c.keys()))
self.assertEqual([], list(c.items()))
self.assertEqual(None, c.get('hoi'))
self.assertEqual(0, len(c))
# should not iterate
for i in c:
pass
def test_pi(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.ProcessingInstruction
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].tag, ProcessingInstruction)
self.assertXML(b"<a><?foo some more text?></a>",
a)
def test_processinginstruction(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.PI
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].tag, ProcessingInstruction)
self.assertXML(b"<a><?foo some more text?></a>",
a)
def test_pi_nonsense(self):
ProcessingInstruction = self.etree.ProcessingInstruction
pi = ProcessingInstruction('foo')
self.assertEqual({}, pi.attrib)
self.assertEqual([], list(pi.keys()))
self.assertEqual([], list(pi.items()))
self.assertEqual(None, pi.get('hoi'))
self.assertEqual(0, len(pi))
# should not iterate
for i in pi:
pass
def test_setitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
a[0] = c
self.assertEqual(
c,
a[0])
self.assertXML(b'<a><c></c></a>',
a)
self.assertXML(b'<b></b>',
b)
def test_setitem2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
for i in range(5):
b = SubElement(a, 'b%s' % i)
c = SubElement(b, 'c')
for i in range(5):
d = Element('d')
e = SubElement(d, 'e')
a[i] = d
self.assertXML(
b'<a><d><e></e></d><d><e></e></d><d><e></e></d><d><e></e></d><d><e></e></d></a>',
a)
self.assertXML(b'<c></c>',
c)
def test_setitem_replace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
SubElement(a, 'b')
d = Element('d')
a[0] = d
self.assertXML(b'<a><d></d></a>', a)
def test_setitem_indexerror(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
self.assertRaises(IndexError, operator.setitem, a, 1, Element('c'))
def test_setitem_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'B2'
c = Element('c')
c.tail = 'C2'
a[0] = c
self.assertXML(
b'<a><c></c>C2</a>',
a)
def test_tag_write(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.tag = 'c'
self.assertEqual(
'c',
a.tag)
self.assertXML(
b'<c><b></b></c>',
a)
def test_tag_reset_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('{a}a')
b1 = SubElement(a, '{a}b')
b2 = SubElement(a, '{b}b')
self.assertEqual('{a}b', b1.tag)
b1.tag = 'c'
# can't use C14N here!
self.assertEqual('c', b1.tag)
self.assertEqual(b'<c', tostring(b1)[:2])
self.assertTrue(b'<c' in tostring(a))
def test_tag_reset_root_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('{a}a')
b1 = SubElement(a, '{a}b')
b2 = SubElement(a, '{b}b')
a.tag = 'c'
self.assertEqual(
'c',
a.tag)
# can't use C14N here!
self.assertEqual('c', a.tag)
self.assertEqual(b'<c', tostring(a)[:2])
def test_tag_str_subclass(self):
Element = self.etree.Element
class strTest(str):
pass
a = Element("a")
a.tag = strTest("TAG")
self.assertXML(b'<TAG></TAG>',
a)
def test_delitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
del a[1]
self.assertXML(
b'<a><b></b><d></d></a>',
a)
del a[0]
self.assertXML(
b'<a><d></d></a>',
a)
del a[0]
self.assertXML(
b'<a></a>',
a)
# move deleted element into other tree afterwards
other = Element('other')
other.append(c)
self.assertXML(
b'<other><c></c></other>',
other)
def test_del_insert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
self.assertXML(
b'<a><b><bs></bs></b><c><cs></cs></c></a>',
a)
self.assertXML(b'<b><bs></bs></b>', b)
self.assertXML(b'<c><cs></cs></c>', c)
del a[0]
self.assertXML(
b'<a><c><cs></cs></c></a>',
a)
self.assertXML(b'<b><bs></bs></b>', b)
self.assertXML(b'<c><cs></cs></c>', c)
a.insert(0, el)
self.assertXML(
b'<a><b><bs></bs></b><c><cs></cs></c></a>',
a)
self.assertXML(b'<b><bs></bs></b>', b)
self.assertXML(b'<c><cs></cs></c>', c)
def test_del_setitem(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
del a[0]
a[0] = el
self.assertXML(
b'<a><b><bs></bs></b></a>',
a)
self.assertXML(b'<b><bs></bs></b>', b)
self.assertXML(b'<c><cs></cs></c>', c)
def test_del_setslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
bs = SubElement(b, 'bs')
c = SubElement(a, 'c')
cs = SubElement(c, 'cs')
el = a[0]
del a[0]
a[0:0] = [el]
self.assertXML(
b'<a><b><bs></bs></b><c><cs></cs></c></a>',
a)
self.assertXML(b'<b><bs></bs></b>', b)
self.assertXML(b'<c><cs></cs></c>', c)
def test_replace_slice_tail(self):
XML = self.etree.XML
a = XML(b'<a><b></b>B2<c></c>C2</a>')
b, c = a
a[:] = []
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
def test_merge_namespaced_subtree_as_slice(self):
XML = self.etree.XML
root = XML(
b'<foo><bar xmlns:baz="http://huhu"><puh><baz:bump1 /><baz:bump2 /></puh></bar></foo>')
root[:] = root.findall('.//puh') # delete bar from hierarchy
# previously, this lost a namespace declaration on bump2
result = self.etree.tostring(root)
foo = self.etree.fromstring(result)
self.assertEqual('puh', foo[0].tag)
self.assertEqual('{http://huhu}bump1', foo[0][0].tag)
self.assertEqual('{http://huhu}bump2', foo[0][1].tag)
def test_delitem_tail_dealloc(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b></b>B2<c></c>C2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
del a[0]
self.assertXML(
b'<a><c></c>C2</a>',
a)
def test_delitem_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b></b>B2<c></c>C2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
b, c = a
del a[0]
self.assertXML(
b'<a><c></c>C2</a>',
a)
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
def test_clear(self):
Element = self.etree.Element
a = Element('a')
a.text = 'foo'
a.tail = 'bar'
a.set('hoi', 'dag')
a.clear()
self.assertEqual(None, a.text)
self.assertEqual(None, a.tail)
self.assertEqual(None, a.get('hoi'))
self.assertEqual('a', a.tag)
def test_clear_sub(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'foo'
a.tail = 'bar'
a.set('hoi', 'dag')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
a.clear()
self.assertEqual(None, a.text)
self.assertEqual(None, a.tail)
self.assertEqual(None, a.get('hoi'))
self.assertEqual('a', a.tag)
self.assertEqual(0, len(a))
self.assertXML(b'<a></a>',
a)
self.assertXML(b'<b><c></c></b>',
b)
def test_clear_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b></b>B2<c></c>C2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
a.clear()
self.assertXML(
b'<a></a>',
a)
def test_insert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
a.insert(0, d)
self.assertEqual(
d,
a[0])
self.assertXML(
b'<a><d></d><b></b><c></c></a>',
a)
e = Element('e')
a.insert(2, e)
self.assertEqual(
e,
a[2])
self.assertXML(
b'<a><d></d><b></b><e></e><c></c></a>',
a)
def test_insert_name_interning(self):
# See GH#268 / LP#1773749.
Element = self.etree.Element
SubElement = self.etree.SubElement
# Use unique names to make sure they are new in the tag name dict.
import uuid
names = {k: f'tag-{uuid.uuid4()}' for k in 'abcde'}
a = Element(names['a'])
b = SubElement(a, names['b'])
c = SubElement(a, names['c'])
d = Element(names['d'])
a.insert(0, d)
self.assertEqual(
d,
a[0])
self.assertXML(
('<%(a)s><%(d)s></%(d)s><%(b)s></%(b)s><%(c)s></%(c)s></%(a)s>' % names).encode('utf-8'),
a)
e = Element(names['e'])
a.insert(2, e)
self.assertEqual(
e,
a[2])
self.assertXML(
('<%(a)s><%(d)s></%(d)s><%(b)s></%(b)s><%(e)s></%(e)s><%(c)s></%(c)s></%(a)s>' % names).encode('utf-8'),
a)
def test_insert_beyond_index(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
a.insert(2, c)
self.assertEqual(
c,
a[1])
self.assertXML(
b'<a><b></b><c></c></a>',
a)
def test_insert_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
a.insert(-1, d)
self.assertEqual(
d,
a[-2])
self.assertXML(
b'<a><b></b><d></d><c></c></a>',
a)
def test_insert_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = Element('c')
c.tail = 'C2'
a.insert(0, c)
self.assertXML(
b'<a><c></c>C2<b></b></a>',
a)
def test_remove(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
a.remove(b)
self.assertEqual(
c,
a[0])
self.assertXML(
b'<a><c></c></a>',
a)
def test_remove_ns(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{http://test}a')
b = SubElement(a, '{http://test}b')
c = SubElement(a, '{http://test}c')
a.remove(b)
self.assertXML(
b'<ns0:a xmlns:ns0="http://test"><ns0:c></ns0:c></ns0:a>',
a)
self.assertXML(
b'<ns0:b xmlns:ns0="http://test"></ns0:b>',
b)
def test_remove_nonexisting(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = Element('d')
self.assertRaises(
ValueError, a.remove, d)
def test_remove_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
b.tail = 'b2'
a.remove(b)
self.assertXML(
b'<a></a>',
a)
self.assertEqual('b2', b.tail)
def test_remove_while_iterating(self):
# There is no guarantee that this "works", but it should
# remove at least one child and not crash.
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
SubElement(a, 'b')
SubElement(a, 'c')
SubElement(a, 'd')
for el in a:
a.remove(el)
self.assertLess(len(a), 3)
def test_makeelement(self):
Element = self.etree.Element
a = Element('a')
b = a.makeelement('c', {'hoi':'dag'})
self.assertXML(
b'<c hoi="dag"></c>',
b)
required_versions_ET['test_iter'] = (1,3)
def test_iter(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[a, b, d, c, e],
list(a.iter()))
self.assertEqual(
[d],
list(d.iter()))
def test_iter_remove_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = 'a'
a.tail = 'a1' * 100
b = SubElement(a, 'b')
b.text = 'b'
b.tail = 'b1' * 100
c = SubElement(a, 'c')
c.text = 'c'
c.tail = 'c1' * 100
d = SubElement(b, 'd')
d.text = 'd'
d.tail = 'd1' * 100
e = SubElement(c, 'e')
e.text = 'e'
e.tail = 'e1' * 100
for el in a.iter():
el.tail = None
el = None
self.assertEqual(
[None] * 5,
[el.tail for el in a.iter()])
def test_getslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(
[b, c],
a[0:2])
self.assertEqual(
[b, c, d],
a[:])
self.assertEqual(
[b, c, d],
a[:10])
self.assertEqual(
[b],
a[0:1])
self.assertEqual(
[],
a[10:12])
def test_getslice_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
self.assertEqual(
[d],
a[-1:])
self.assertEqual(
[c, d],
a[-2:])
self.assertEqual(
[c],
a[-2:-1])
self.assertEqual(
[b, c],
a[-3:-1])
self.assertEqual(
[b, c],
a[-3:2])
def test_getslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[e,d,c,b],
a[::-1])
self.assertEqual(
[b,d],
a[::2])
self.assertEqual(
[e,c],
a[::-2])
self.assertEqual(
[d,c],
a[-2:0:-1])
self.assertEqual(
[e],
a[:1:-2])
def test_getslice_text(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b>B</b>B1<c>C</c>C1</a>')
doc = ElementTree(file=f)
a = doc.getroot()
b = a[0]
c = a[1]
self.assertEqual(
[b, c],
a[:])
self.assertEqual(
[b],
a[0:1])
self.assertEqual(
[c],
a[1:])
def test_comment_getitem_getslice(self):
Element = self.etree.Element
Comment = self.etree.Comment
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
foo = Comment('foo')
a.append(foo)
c = SubElement(a, 'c')
self.assertEqual(
[b, foo, c],
a[:])
self.assertEqual(
foo,
a[1])
a[1] = new = Element('new')
self.assertEqual(
new,
a[1])
self.assertXML(
b'<a><b></b><new></new><c></c></a>',
a)
def test_delslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1:3]
self.assertEqual(
[b, e],
list(a))
def test_delslice_negative1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1:-1]
self.assertEqual(
[b, e],
list(a))
def test_delslice_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[-3:-1]
self.assertEqual(
[b, e],
list(a))
def test_delslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[1::2]
self.assertEqual(
[b, d],
list(a))
def test_delslice_step_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[::-1]
self.assertEqual(
[],
list(a))
def test_delslice_step_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
del a[::-2]
self.assertEqual(
[b, d],
list(a))
def test_delslice_child_tail_dealloc(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b></b>B2<c></c>C2<d></d>D2<e></e>E2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
del a[1:3]
self.assertXML(
b'<a><b></b>B2<e></e>E2</a>',
a)
def test_delslice_child_tail(self):
ElementTree = self.etree.ElementTree
f = BytesIO(b'<a><b></b>B2<c></c>C2<d></d>D2<e></e>E2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
b, c, d, e = a
del a[1:3]
self.assertXML(
b'<a><b></b>B2<e></e>E2</a>',
a)
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
self.assertEqual("D2", d.tail)
self.assertEqual("E2", e.tail)
def test_delslice_tail(self):
XML = self.etree.XML
a = XML(b'<a><b></b>B2<c></c>C2</a>')
b, c = a
del a[:]
self.assertEqual("B2", b.tail)
self.assertEqual("C2", c.tail)
def test_delslice_memory(self):
# this could trigger a crash
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
del b # no more reference to b
del a[:]
self.assertEqual('c', c.tag)
def test_setslice(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[1:2] = s
self.assertEqual(
[b, e, f, g, d],
list(a))
def test_setslice_all(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[:] = s
self.assertEqual(
[e, f, g],
list(a))
def test_setslice_all_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[:] = s
self.assertEqual(
[e, f, g],
list(a))
def test_setslice_all_replace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
s = [b, c, d]
a[:] = s
self.assertEqual(
[b, c, d],
list(a))
def test_setslice_all_replace_reversed(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
def test_setslice_all_replace_reversed_ns1(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{ns}a')
b = SubElement(a, '{ns}b', {'{ns1}a1': 'test'})
c = SubElement(a, '{ns}c', {'{ns2}a2': 'test'})
d = SubElement(a, '{ns}d', {'{ns3}a3': 'test'})
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
self.assertEqual(
['{ns}d', '{ns}c', '{ns}b'],
[ child.tag for child in a ])
self.assertEqual(
[['{ns3}a3'], ['{ns2}a2'], ['{ns1}a1']],
[ list(child.attrib.keys()) for child in a ])
def test_setslice_all_replace_reversed_ns2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{ns}a')
b = SubElement(a, '{ns1}b', {'{ns}a1': 'test'})
c = SubElement(a, '{ns2}c', {'{ns}a2': 'test'})
d = SubElement(a, '{ns3}d', {'{ns}a3': 'test'})
s = [d, c, b]
a[:] = s
self.assertEqual(
[d, c, b],
list(a))
self.assertEqual(
['{ns3}d', '{ns2}c', '{ns1}b'],
[ child.tag for child in a ])
self.assertEqual(
[['{ns}a3'], ['{ns}a2'], ['{ns}a1']],
[ list(child.attrib.keys()) for child in a ])
def test_setslice_end(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
g = Element('g')
h = Element('h')
s = [e, f]
a[99:] = s
self.assertEqual(
[b, c, e, f],
list(a))
s = [g, h]
a[:0] = s
self.assertEqual(
[g, h, b, c, e, f],
list(a))
def test_setslice_end_exact(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[3:] = s
self.assertEqual(
[b, c, d, e, f, g],
list(a))
def test_setslice_single(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
e = Element('e')
f = Element('f')
s = [e]
a[0:1] = s
self.assertEqual(
[e, c],
list(a))
s = [f]
a[1:2] = s
self.assertEqual(
[e, f],
list(a))
def test_setslice_tail(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
f = BytesIO(b'<a><b></b>B2<c></c>C2<d></d>D2<e></e>E2</a>')
doc = ElementTree(file=f)
a = doc.getroot()
x = Element('x')
y = Element('y')
z = Element('z')
x.tail = 'X2'
y.tail = 'Y2'
z.tail = 'Z2'
a[1:3] = [x, y, z]
self.assertXML(
b'<a><b></b>B2<x></x>X2<y></y>Y2<z></z>Z2<e></e>E2</a>',
a)
def test_setslice_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
x = Element('x')
y = Element('y')
a[1:-1] = [x, y]
self.assertEqual(
[b, x, y, d],
list(a))
def test_setslice_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
x = Element('x')
y = Element('y')
a[1:-2] = [x, y]
self.assertEqual(
[b, x, y, c, d],
list(a))
def test_setslice_empty(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
c = Element('c')
a[:] = [b, c]
self.assertEqual(
[b, c],
list(a))
def test_tail_elementtree_root(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.tail = 'A2'
t = ElementTree(element=a)
self.assertEqual('A2',
a.tail)
def test_ns_access(self):
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
f = BytesIO(('<x:a xmlns:x="%s"><x:b></x:b></x:a>' % ns).encode('utf-8'))
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns,
a[0].tag)
def test_ns_access2(self):
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
f = BytesIO(('<x:a xmlns:x="%s" xmlns:y="%s"><x:b></x:b><y:b></y:b></x:a>' % (ns, ns2)).encode('utf-8'))
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns,
a[0].tag)
self.assertEqual('{%s}b' % ns2,
a[1].tag)
def test_ns_setting(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
a = Element('{%s}a' % ns)
b = SubElement(a, '{%s}b' % ns2)
c = SubElement(a, '{%s}c' % ns)
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
b.tag)
self.assertEqual('{%s}c' % ns,
c.tag)
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
b.tag)
self.assertEqual('{%s}c' % ns,
c.tag)
def test_ns_tag_parse(self):
ElementTree = self.etree.ElementTree
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
f = BytesIO(('<a xmlns="%s" xmlns:x="%s"><x:b></x:b><b></b></a>' % (ns, ns2)).encode('utf-8'))
t = ElementTree(file=f)
a = t.getroot()
self.assertEqual('{%s}a' % ns,
a.tag)
self.assertEqual('{%s}b' % ns2,
a[0].tag)
self.assertEqual('{%s}b' % ns,
a[1].tag)
def test_ns_attr(self):
Element = self.etree.Element
ns = 'http://xml.infrae.com/1'
ns2 = 'http://xml.infrae.com/2'
a = Element('a')
a.set('{%s}foo' % ns, 'Foo')
a.set('{%s}bar' % ns2, 'Bar')
self.assertEqual(
'Foo',
a.get('{%s}foo' % ns))
self.assertEqual(
'Bar',
a.get('{%s}bar' % ns2))
try:
self.assertXML(
('<a xmlns:ns0="%s" xmlns:ns1="%s" ns0:foo="Foo" ns1:bar="Bar"></a>' % (ns, ns2)).encode('utf-8'),
a)
except AssertionError:
self.assertXML(
('<a xmlns:ns0="%s" xmlns:ns1="%s" ns1:foo="Foo" ns0:bar="Bar"></a>' % (ns2, ns)).encode('utf-8'),
a)
def test_ns_move(self):
Element = self.etree.Element
one = self.etree.fromstring(
b'<foo><bar xmlns:ns="http://a.b.c"><ns:baz/></bar></foo>')
baz = one[0][0]
two = Element('root')
two.append(baz)
# removing the originating document could cause a crash/error before
# as namespace is not moved along with it
del one, baz
self.assertEqual('{http://a.b.c}baz', two[0].tag)
def test_ns_decl_tostring(self):
tostring = self.etree.tostring
root = self.etree.XML(
b'<foo><bar xmlns:ns="http://a.b.c"><ns:baz/></bar></foo>')
baz = root[0][0]
nsdecl = re.findall(b"xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']",
tostring(baz))
self.assertEqual([b"http://a.b.c"], nsdecl)
def test_ns_decl_tostring_default(self):
tostring = self.etree.tostring
root = self.etree.XML(
b'<foo><bar xmlns="http://a.b.c"><baz/></bar></foo>')
baz = root[0][0]
nsdecl = re.findall(b"xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']",
tostring(baz))
self.assertEqual([b"http://a.b.c"], nsdecl)
def test_ns_decl_tostring_root(self):
tostring = self.etree.tostring
root = self.etree.XML(
b'<foo xmlns:ns="http://a.b.c"><bar><ns:baz/></bar></foo>')
baz = root[0][0]
nsdecl = re.findall(b"xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']",
tostring(baz))
self.assertEqual([b"http://a.b.c"], nsdecl)
def test_ns_decl_tostring_element(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element("foo")
bar = SubElement(root, "{http://a.b.c}bar")
baz = SubElement(bar, "{http://a.b.c}baz")
nsdecl = re.findall(b"xmlns(?::[a-z0-9]+)?=[\"']([^\"']+)[\"']",
self.etree.tostring(baz))
self.assertEqual([b"http://a.b.c"], nsdecl)
def test_attribute_xmlns_move(self):
Element = self.etree.Element
root = Element('element')
subelement = Element('subelement',
{"{http://www.w3.org/XML/1998/namespace}id": "foo"})
self.assertEqual(1, len(subelement.attrib))
self.assertEqual(
"foo",
subelement.get("{http://www.w3.org/XML/1998/namespace}id"))
root.append(subelement)
self.assertEqual(1, len(subelement.attrib))
self.assertEqual(
list({"{http://www.w3.org/XML/1998/namespace}id" : "foo"}.items()),
list(subelement.attrib.items()))
self.assertEqual(
"foo",
subelement.get("{http://www.w3.org/XML/1998/namespace}id"))
def test_namespaces_after_serialize(self):
parse = self.etree.parse
tostring = self.etree.tostring
ns_href = "http://a.b.c"
one = parse(
BytesIO(('<foo><bar xmlns:ns="%s"><ns:baz/></bar></foo>' % ns_href).encode('utf-8')))
baz = one.getroot()[0][0]
parsed = parse(BytesIO( tostring(baz) )).getroot()
self.assertEqual('{%s}baz' % ns_href, parsed.tag)
def test_attribute_namespace_roundtrip(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
ns_href = "http://a.b.c"
xml = '<root xmlns="%s" xmlns:x="%s"><el x:a="test" /></root>' % (
ns_href, ns_href)
root = fromstring(xml)
self.assertEqual('test', root[0].get('{%s}a' % ns_href))
xml2 = tostring(root)
self.assertTrue(b':a=' in xml2, xml2)
root2 = fromstring(xml2)
self.assertEqual('test', root2[0].get('{%s}a' % ns_href))
def test_attribute_namespace_roundtrip_replaced(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
ns_href = "http://a.b.c"
xml = '<root xmlns="%s" xmlns:x="%s"><el x:a="test" /></root>' % (
ns_href, ns_href)
root = fromstring(xml)
self.assertEqual('test', root[0].get('{%s}a' % ns_href))
root[0].set('{%s}a' % ns_href, 'TEST')
xml2 = tostring(root)
self.assertTrue(b':a=' in xml2, xml2)
root2 = fromstring(xml2)
self.assertEqual('TEST', root2[0].get('{%s}a' % ns_href))
required_versions_ET['test_register_namespace'] = (1,3)
def test_register_namespace(self):
# ET 1.3+
Element = self.etree.Element
prefix = 'TESTPREFIX'
namespace = 'http://seriously.unknown/namespace/URI'
el = Element('{%s}test' % namespace)
self.assertEqual(
'<ns0:test xmlns:ns0="%s"></ns0:test>' % namespace,
self._writeElement(el).decode())
self.etree.register_namespace(prefix, namespace)
el = Element('{%s}test' % namespace)
self.assertEqual('<%s:test xmlns:%s="%s"></%s:test>' % (
prefix, prefix, namespace, prefix),
self._writeElement(el).decode())
self.assertRaises(ValueError, self.etree.register_namespace, 'ns25', namespace)
def test_tostring(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(b'<a><b></b><c></c></a>',
canonicalize(tostring(a)))
def test_tostring_element(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertEqual(b'<b></b>',
canonicalize(tostring(b)))
self.assertEqual(b'<c><d></d></c>',
canonicalize(tostring(c)))
def test_tostring_element_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(tostring(b) == b'<b/>Foo' or
tostring(b) == b'<b />Foo')
required_versions_ET['test_tostring_method_html'] = (1,3)
def test_tostring_method_html(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
html = Element('html')
body = SubElement(html, 'body')
p = SubElement(body, 'p')
p.text = "html"
SubElement(p, 'br').tail = "test"
self.assertEqual(b'<html><body><p>html<br>test</p></body></html>',
tostring(html, method="html"))
required_versions_ET['test_tostring_method_text'] = (1,3)
def test_tostring_method_text(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = "TAIL"
c = SubElement(a, 'c')
c.text = "C"
self.assertEqual(b'ABTAILCtail',
tostring(a, method="text"))
def test_iterparse(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b></b><c/></a>')
iterator = iterparse(f)
self.assertEqual(None,
iterator.root)
events = list(iterator)
root = iterator.root
self.assertEqual(
[('end', root[0]), ('end', root[1]), ('end', root)],
events)
def test_iterparse_incomplete(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b></b><c/></a>')
iterator = iterparse(f)
self.assertEqual(None,
iterator.root)
event, element = next(iter(iterator))
self.assertEqual('end', event)
self.assertEqual('b', element.tag)
def test_iterparse_file(self):
iterparse = self.etree.iterparse
iterator = iterparse(fileInTestDir("test.xml"))
self.assertEqual(None,
iterator.root)
events = list(iterator)
root = iterator.root
self.assertEqual(
[('end', root[0]), ('end', root)],
events)
def test_iterparse_start(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b></b><c/></a>')
iterator = iterparse(f, events=('start',))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[1])],
events)
def test_iterparse_start_end(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b></b><c/></a>')
iterator = iterparse(f, events=('start','end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root), ('start', root[0]), ('end', root[0]),
('start', root[1]), ('end', root[1]), ('end', root)],
events)
def test_iterparse_clear(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b></b><c/></a>')
iterator = iterparse(f)
for event, elem in iterator:
elem.clear()
root = iterator.root
self.assertEqual(0,
len(root))
def test_iterparse_large(self):
iterparse = self.etree.iterparse
CHILD_COUNT = 12345
f = BytesIO(b'<a>%s</a>' % (b'<b>test</b>' * CHILD_COUNT))
i = 0
for key in iterparse(f):
event, element = key
i += 1
self.assertEqual(i, CHILD_COUNT + 1)
def test_iterparse_set_ns_attribute(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a xmlns="http://ns1/"><b><c xmlns="http://ns2/"/></b></a>')
attr_name = '{http://testns/}bla'
events = []
iterator = iterparse(f, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{http://ns1/}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
root = iterator.root
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterparse_only_end_ns(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a xmlns="http://ns1/"><b><c xmlns="http://ns2/"/></b></a>')
attr_name = '{http://testns/}bla'
events = []
iterator = iterparse(f, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{http://ns1/}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
root = iterator.root
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterparse_move_elements(self):
iterparse = self.etree.iterparse
f = BytesIO(b'<a><b><d/></b><c/></a>')
for event, node in etree.iterparse(f): pass
root = etree.Element('new_root', {})
root[:] = node[:]
self.assertEqual(
['b', 'c'],
[ el.tag for el in root ])
def test_iterparse_cdata(self):
tostring = self.etree.tostring
f = BytesIO(b'<root><![CDATA[test]]></root>')
context = self.etree.iterparse(f)
content = [ el.text for event,el in context ]
self.assertEqual(['test'], content)
self.assertEqual(b'<root>test</root>',
tostring(context.root))
def test_parse_file(self):
parse = self.etree.parse
# from file
tree = parse(fileInTestDir('test.xml'))
self.assertXML(
b'<a><b></b></a>',
tree.getroot())
def test_parse_file_nonexistent(self):
parse = self.etree.parse
self.assertRaises(IOError, parse, fileInTestDir('notthere.xml'))
def test_parse_error_none(self):
parse = self.etree.parse
self.assertRaises(TypeError, parse, None)
required_versions_ET['test_parse_error'] = (1,3)
def test_parse_error(self):
# ET < 1.3 raises ExpatError
parse = self.etree.parse
f = BytesIO(b'<a><b></c></b></a>')
self.assertRaises(SyntaxError, parse, f)
f.close()
required_versions_ET['test_parse_error_from_file'] = (1,3)
def test_parse_error_from_file(self):
parse = self.etree.parse
# from file
f = open(fileInTestDir('test_broken.xml'), 'rb')
self.assertRaises(SyntaxError, parse, f)
f.close()
def test_parse_file_object(self):
parse = self.etree.parse
# from file object
f = open(fileInTestDir('test.xml'), 'rb')
tree = parse(f)
f.close()
self.assertXML(
b'<a><b></b></a>',
tree.getroot())
def test_parse_stringio(self):
parse = self.etree.parse
f = BytesIO(b'<a><b></b></a>')
tree = parse(f)
f.close()
self.assertXML(
b'<a><b></b></a>',
tree.getroot()
)
def test_parse_cdata(self):
tostring = self.etree.tostring
root = self.etree.XML(b'<root><![CDATA[test]]></root>')
self.assertEqual('test', root.text)
self.assertEqual(b'<root>test</root>',
tostring(root))
def test_parse_with_encoding(self):
# this can fail in libxml2 <= 2.6.22
parse = self.etree.parse
tree = parse(BytesIO(b'<?xml version="1.0" encoding="ascii"?><html/>'))
self.assertXML(b'<html></html>',
tree.getroot())
def test_encoding(self):
Element = self.etree.Element
a = Element('a')
a.text = 'Søk på nettet'
self.assertXML(
'<a>Søk på nettet</a>'.encode(),
a, 'utf-8')
def test_encoding_exact(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = 'Søk på nettet'
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f, encoding='utf-8')
self.assertEqual('<a>Søk på nettet</a>'.encode(),
f.getvalue().replace(b'\n',b''))
def test_parse_file_encoding(self):
parse = self.etree.parse
# from file
tree = parse(fileInTestDir('test-string.xml'))
self.assertXML(
'<a>Søk på nettet</a>'.encode(),
tree.getroot(), 'UTF-8')
def test_parse_file_object_encoding(self):
parse = self.etree.parse
# from file object
f = open(fileInTestDir('test-string.xml'), 'rb')
tree = parse(f)
f.close()
self.assertXML(
'<a>Søk på nettet</a>'.encode(),
tree.getroot(), 'UTF-8')
def test_encoding_8bit_latin1(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = 'Søk på nettet'
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f, encoding='iso-8859-1')
result = f.getvalue()
declaration = b"<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>"
self.assertEncodingDeclaration(result, b'iso-8859-1')
result = result.split(b'?>', 1)[-1].replace(b'\n',b'')
self.assertEqual('<a>Søk på nettet</a>'.encode('iso-8859-1'),
result)
required_versions_ET['test_parse_encoding_8bit_explicit'] = (1,3)
def test_parse_encoding_8bit_explicit(self):
XMLParser = self.XMLParser
text = 'Søk på nettet'
xml_latin1 = ('<a>%s</a>' % text).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
self.etree.parse,
BytesIO(xml_latin1))
tree = self.etree.parse(BytesIO(xml_latin1),
XMLParser(encoding="iso-8859-1"))
a = tree.getroot()
self.assertEqual(a.text, text)
required_versions_ET['test_parse_encoding_8bit_override'] = (1,3)
def test_parse_encoding_8bit_override(self):
XMLParser = self.XMLParser
text = 'Søk på nettet'
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = ('%s<a>%s</a>' % (wrong_declaration, text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
self.etree.parse,
BytesIO(xml_latin1))
tree = self.etree.parse(BytesIO(xml_latin1),
XMLParser(encoding="iso-8859-1"))
a = tree.getroot()
self.assertEqual(a.text, text)
def _test_wrong_unicode_encoding(self):
# raise error on wrong encoding declaration in unicode strings
XML = self.etree.XML
test_utf = ('<?xml version="1.0" encoding="iso-8859-1"?>' +
'<a>Søk på nettet</a>')
self.assertRaises(SyntaxError, XML, test_utf)
def test_encoding_write_default_encoding(self):
ElementTree = self.etree.ElementTree
Element = self.etree.Element
a = Element('a')
a.text = 'Søk på nettet'
f = BytesIO()
tree = ElementTree(element=a)
tree.write(f)
data = f.getvalue().replace(b'\n',b'')
self.assertEqual(
'<a>Søk på nettet</a>'.encode('ASCII', 'xmlcharrefreplace'),
data)
def test_encoding_tostring(self):
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
a.text = 'Søk på nettet'
self.assertEqual('<a>Søk på nettet</a>'.encode(),
tostring(a, encoding='utf-8'))
def test_encoding_tostring_unknown(self):
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
a.text = 'Søk på nettet'
self.assertRaises(LookupError, tostring, a,
encoding='Invalid Encoding')
def test_encoding_tostring_sub(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = 'Søk på nettet'
self.assertEqual('<b>Søk på nettet</b>'.encode(),
tostring(b, encoding='utf-8'))
def test_encoding_tostring_sub_tail(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = 'Søk på nettet'
b.tail = 'Søk'
self.assertEqual('<b>Søk på nettet</b>Søk'.encode(),
tostring(b, encoding='utf-8'))
def test_encoding_tostring_default_encoding(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
a.text = 'Søk på nettet'
expected = b'<a>Søk på nettet</a>'
self.assertEqual(
expected,
tostring(a))
def test_encoding_sub_tostring_default_encoding(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
a = Element('a')
b = SubElement(a, 'b')
b.text = 'Søk på nettet'
expected = b'<b>Søk på nettet</b>'
self.assertEqual(
expected,
tostring(b))
def test_encoding_8bit_xml(self):
utext = 'Søk på nettet'
uxml = '<p>%s</p>' % utext
prologue = b'<?xml version="1.0" encoding="iso-8859-1" ?>'
isoxml = prologue + uxml.encode('iso-8859-1')
tree = self.etree.XML(isoxml)
self.assertEqual(utext, tree.text)
def test_encoding_utf8_bom(self):
utext = 'Søk på nettet'
uxml = ('<?xml version="1.0" encoding="UTF-8"?>' +
'<p>%s</p>' % utext)
bom = b'\\xEF\\xBB\\xBF'.decode("unicode_escape").encode("latin1")
xml = bom + uxml.encode("utf-8")
tree = etree.XML(xml)
self.assertEqual(utext, tree.text)
def test_encoding_8bit_parse_stringio(self):
utext = 'Søk på nettet'
uxml = '<p>%s</p>' % utext
prologue = b'<?xml version="1.0" encoding="iso-8859-1" ?>'
isoxml = prologue + uxml.encode('iso-8859-1')
el = self.etree.parse(BytesIO(isoxml)).getroot()
self.assertEqual(utext, el.text)
def test_deepcopy_elementtree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.text = "Foo"
atree = ElementTree(a)
btree = copy.deepcopy(atree)
self.assertEqual("Foo", atree.getroot().text)
self.assertEqual("Foo", btree.getroot().text)
self.assertFalse(btree is atree)
self.assertFalse(btree.getroot() is atree.getroot())
def test_deepcopy(self):
Element = self.etree.Element
a = Element('a')
a.text = 'Foo'
b = copy.deepcopy(a)
self.assertEqual('Foo', b.text)
b.text = 'Bar'
self.assertEqual('Bar', b.text)
self.assertEqual('Foo', a.text)
del a
self.assertEqual('Bar', b.text)
def test_deepcopy_tail(self):
Element = self.etree.Element
a = Element('a')
a.tail = 'Foo'
b = copy.deepcopy(a)
self.assertEqual('Foo', b.tail)
b.tail = 'Bar'
self.assertEqual('Bar', b.tail)
self.assertEqual('Foo', a.tail)
del a
self.assertEqual('Bar', b.tail)
def test_deepcopy_subelement(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
a.text = 'FooText'
a.tail = 'FooTail'
b = copy.deepcopy(a)
self.assertEqual('FooText', b.text)
self.assertEqual('FooTail', b.tail)
b.text = 'BarText'
b.tail = 'BarTail'
self.assertEqual('BarTail', b.tail)
self.assertEqual('FooTail', a.tail)
self.assertEqual('BarText', b.text)
self.assertEqual('FooText', a.text)
del a
self.assertEqual('BarTail', b.tail)
self.assertEqual('BarText', b.text)
def test_deepcopy_namespaces(self):
root = self.etree.XML(b'''<doc xmlns="dns" xmlns:t="tns">
<parent><node t:foo="bar" /></parent>
</doc>''')
self.assertEqual(
root[0][0].get('{tns}foo'),
copy.deepcopy(root[0])[0].get('{tns}foo') )
self.assertEqual(
root[0][0].get('{tns}foo'),
copy.deepcopy(root[0][0]).get('{tns}foo') )
def test_deepcopy_append(self):
# previously caused a crash
Element = self.etree.Element
tostring = self.etree.tostring
a = Element('a')
b = copy.deepcopy(a)
a.append( Element('C') )
b.append( Element('X') )
self.assertEqual(b'<a><C/></a>',
tostring(a).replace(b' ', b''))
self.assertEqual(b'<a><X/></a>',
tostring(b).replace(b' ', b''))
def test_deepcopy_comment(self):
# previously caused a crash
# not supported by ET < 1.3!
Comment = self.etree.Comment
a = Comment("ONE")
b = copy.deepcopy(a)
b.text = "ANOTHER"
self.assertEqual('ONE', a.text)
self.assertEqual('ANOTHER', b.text)
def test_shallowcopy(self):
Element = self.etree.Element
a = Element('a')
a.text = 'Foo'
b = copy.copy(a)
self.assertEqual('Foo', b.text)
b.text = 'Bar'
self.assertEqual('Bar', b.text)
self.assertEqual('Foo', a.text)
# XXX ElementTree will share nodes, but lxml.etree won't..
def test_shallowcopy_elementtree(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
a = Element('a')
a.text = 'Foo'
atree = ElementTree(a)
btree = copy.copy(atree)
self.assertFalse(btree is atree)
self.assertTrue(btree.getroot() is atree.getroot())
self.assertEqual('Foo', atree.getroot().text)
def _test_element_boolean(self):
# deprecated as of ET 1.3/lxml 2.0
etree = self.etree
e = etree.Element('foo')
self.assertEqual(False, bool(e))
etree.SubElement(e, 'bar')
self.assertEqual(True, bool(e))
e = etree.Element('foo')
e.text = 'hey'
self.assertEqual(False, bool(e))
e = etree.Element('foo')
e.tail = 'bar'
self.assertEqual(False, bool(e))
e = etree.Element('foo')
e.set('bar', 'Bar')
self.assertEqual(False, bool(e))
def test_multiple_elementrees(self):
etree = self.etree
a = etree.Element('a')
b = etree.SubElement(a, 'b')
t = etree.ElementTree(a)
self.assertEqual(self._rootstring(t), b'<a><b/></a>')
t1 = etree.ElementTree(a)
self.assertEqual(self._rootstring(t1), b'<a><b/></a>')
self.assertEqual(self._rootstring(t), b'<a><b/></a>')
t2 = etree.ElementTree(b)
self.assertEqual(self._rootstring(t2), b'<b/>')
self.assertEqual(self._rootstring(t1), b'<a><b/></a>')
self.assertEqual(self._rootstring(t), b'<a><b/></a>')
def test_qname(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a1 = etree.Element(qname)
a2 = etree.SubElement(a1, qname)
self.assertEqual(a1.tag, "{myns}a")
self.assertEqual(a2.tag, "{myns}a")
def test_qname_cmp(self):
etree = self.etree
qname1 = etree.QName('myns', 'a')
qname2 = etree.QName('myns', 'a')
self.assertEqual(qname1, "{myns}a")
self.assertEqual("{myns}a", qname2)
self.assertEqual(qname1, qname1)
self.assertEqual(qname1, qname2)
def test_qname_attribute_getset(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a = etree.Element(qname)
a.set(qname, "value")
self.assertEqual(a.get(qname), "value")
self.assertEqual(a.get("{myns}a"), "value")
def test_qname_attrib(self):
etree = self.etree
qname = etree.QName('myns', 'a')
a = etree.Element(qname)
a.attrib[qname] = "value"
self.assertEqual(a.attrib[qname], "value")
self.assertEqual(a.attrib.get(qname), "value")
self.assertEqual(a.attrib["{myns}a"], "value")
self.assertEqual(a.attrib.get("{myns}a"), "value")
def test_qname_attribute_resolve(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname)
a.set(qname, qname)
self.assertXML(
b'<ns0:a xmlns:ns0="http://myns" ns0:a="ns0:a"></ns0:a>',
a)
def test_qname_attribute_resolve_new(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element('a')
a.set('a', qname)
self.assertXML(
b'<a xmlns:ns0="http://myns" a="ns0:a"></a>',
a)
def test_qname_attrib_resolve(self):
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname)
a.attrib[qname] = qname
self.assertXML(
b'<ns0:a xmlns:ns0="http://myns" ns0:a="ns0:a"></ns0:a>',
a)
def test_parser_version(self):
etree = self.etree
parser = etree.XMLParser()
if hasattr(parser, "version"):
# ElementTree 1.3+, cET
self.assertTrue(re.match("[^ ]+ [0-9.]+", parser.version))
# feed parser interface
def test_feed_parser_bytes(self):
parser = self.XMLParser()
parser.feed(b'<?xml version=')
parser.feed(b'"1.0"?><ro')
parser.feed(b'ot><')
parser.feed(b'a test="works"/')
parser.feed(b'></root')
parser.feed(b'>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
def test_feed_parser_unicode_ascii(self):
parser = self.XMLParser()
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
parser.feed('ot><')
parser.feed('a test="works"/')
parser.feed('></root')
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
@et_needs_pyversion(3)
def test_feed_parser_unicode_astral(self):
parser = self.XMLParser()
astral_chunk = '-- \U00010143 --' # astral (4 bytes/chr)
latin1_chunk = '-- \xf8 --' # Latin1 (1 byte/chr)
parser.feed('<ro') # ASCII (1 byte/chr)
parser.feed('ot><')
parser.feed('a test="w\N{DIAMETER SIGN}rks">') # BMP (2 bytes/chr)
parser.feed(astral_chunk)
parser.feed(latin1_chunk)
parser.feed('</a></root')
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "w\N{DIAMETER SIGN}rks")
self.assertEqual(root[0].text, astral_chunk + latin1_chunk)
def test_feed_parser_unicode_astral_large(self):
parser = self.XMLParser()
astral_chunk = '-- \U00010143 --' * (2 ** 16) # astral (4 bytes/chr)
latin1_chunk = '-- \xf8 --' # Latin1 (1 byte/chr)
parser.feed('<ro')
parser.feed('ot><') # ASCII (1 byte/chr)
parser.feed('a test="w\N{DIAMETER SIGN}rks">') # BMP (2 bytes/chr)
parser.feed(astral_chunk)
parser.feed((astral_chunk + "</a> <a>" + astral_chunk) * 16)
parser.feed(latin1_chunk)
parser.feed('</a></root')
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(root[0].get("test"), "w\N{DIAMETER SIGN}rks")
for child in root[:-1]:
self.assertEqual(child.tag, "a")
self.assertEqual(child.text, astral_chunk * 2)
self.assertEqual(root[-1].tag, "a")
self.assertEqual(root[-1].text, astral_chunk + latin1_chunk)
required_versions_ET['test_feed_parser_error_close_empty'] = (1,3)
def test_feed_parser_error_close_empty(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_close_incomplete'] = (1,3)
def test_feed_parser_error_close_incomplete(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_broken'] = (1,3)
def test_feed_parser_error_broken(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
try:
parser.feed('<><><><><><><')
except ParseError:
# can raise, but not required before close()
pass
self.assertRaises(ParseError, parser.close)
required_versions_ET['test_feed_parser_error_position'] = (1,3)
def test_feed_parser_error_position(self):
ParseError = self.etree.ParseError
parser = self.XMLParser()
try:
parser.close()
except ParseError:
e = sys.exc_info()[1]
self.assertNotEqual(None, e.code)
self.assertNotEqual(0, e.code)
self.assertTrue(isinstance(e.position, tuple))
self.assertTrue(e.position >= (0, 0))
# parser target interface
required_versions_ET['test_parser_target_property'] = (1,3)
def test_parser_target_property(self):
class Target:
pass
target = Target()
parser = self.XMLParser(target=target)
self.assertEqual(target, parser.target)
def test_parser_target_tag(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed("<TAG/>")
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_start(self):
assertEqual = self.assertEqual
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
raise ValueError("TEST")
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
if 'lxml' in self.etree.__name__:
self.assertEqual(["start"], events)
else:
# cElementTree calls end() as well
self.assertTrue("start" in events)
def test_parser_target_error_in_end(self):
assertEqual = self.assertEqual
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
raise ValueError("TEST")
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_close(self):
assertEqual = self.assertEqual
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
raise ValueError("TEST")
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
parser.close()
except ValueError:
self.assertTrue('TEST' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
self.assertEqual(["start", "end"], events)
def test_parser_target_error_in_start_and_close(self):
assertEqual = self.assertEqual
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertEqual("TAG", tag)
raise IndexError("TEST-IE")
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
raise ValueError("TEST-VE")
parser = self.XMLParser(target=Target())
try:
parser.feed("<TAG/>")
parser.close()
except IndexError:
if 'lxml' in self.etree.__name__:
# we try not to swallow the initial exception in Py2
self.assertTrue(sys.version_info[0] < 3)
self.assertTrue('TEST-IE' in str(sys.exc_info()[1]))
except ValueError:
if 'lxml' in self.etree.__name__:
self.assertTrue(sys.version_info[0] >= 3)
self.assertTrue('TEST-VE' in str(sys.exc_info()[1]))
else:
self.assertTrue(False)
if 'lxml' in self.etree.__name__:
self.assertEqual(["start"], events)
else:
# cElementTree calls end() as well
self.assertTrue("start" in events)
def test_elementtree_parser_target(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
Element = self.etree.Element
events = []
class Target:
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return Element("DONE")
parser = self.XMLParser(target=Target())
tree = self.etree.ElementTree()
tree.parse(BytesIO(b"<TAG/>"), parser=parser)
self.assertEqual("DONE", tree.getroot().tag)
self.assertEqual(["start", "end"], events)
def test_parser_target_attrib(self):
assertEqual = self.assertEqual
events = []
class Target:
def start(self, tag, attrib):
events.append("start-" + tag)
for name, value in attrib.items():
assertEqual(tag + name, value)
def end(self, tag):
events.append("end-" + tag)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed('<root a="roota" b="rootb"><sub c="subc"/></root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "start-sub", "end-sub", "end-root"],
events)
def test_parser_target_data(self):
events = []
class Target:
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
return "DONE"
parser = self.XMLParser(target=Target())
parser.feed('<root>A<sub/>B</root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-sub",
"end-sub", "data-B", "end-root"],
events)
def test_parser_target_entity(self):
events = []
class Target:
def __init__(self):
self._data = []
def _flush_data(self):
if self._data:
events.append("data-" + ''.join(self._data))
del self._data[:]
def start(self, tag, attrib):
self._flush_data()
events.append("start-" + tag)
def end(self, tag):
self._flush_data()
events.append("end-" + tag)
def data(self, data):
self._data.append(data)
def close(self):
self._flush_data()
return "DONE"
parser = self.XMLParser(target=Target())
dtd = '''
<!DOCTYPE root [
<!ELEMENT root (sub*)>
<!ELEMENT sub (#PCDATA)>
<!ENTITY ent "an entity">
]>
'''
parser.feed(dtd+'<root><sub/><sub>this is &ent;</sub><sub/></root>')
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "start-sub", "end-sub", "start-sub",
"data-this is an entity",
"end-sub", "start-sub", "end-sub", "end-root"],
events)
required_versions_ET['test_parser_target_entity_unknown'] = (1,3)
def test_parser_target_entity_unknown(self):
events = []
class Target:
def __init__(self):
self._data = []
def _flush_data(self):
if self._data:
events.append("data-" + ''.join(self._data))
del self._data[:]
def start(self, tag, attrib):
self._flush_data()
events.append("start-" + tag)
def end(self, tag):
self._flush_data()
events.append("end-" + tag)
def data(self, data):
self._data.append(data)
def close(self):
self._flush_data()
return "DONE"
parser = self.XMLParser(target=Target())
def feed():
parser.feed('<root><sub/><sub>some &ent;</sub><sub/></root>')
parser.close()
self.assertRaises(self.etree.ParseError, feed)
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_parser_target_start_end_ns(self):
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
def pi(self, target, data):
self.append(("pi", target, data))
def comment(self, data):
self.append(("comment", data))
def start_ns(self, prefix, uri):
self.append(("start-ns", prefix, uri))
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = self.etree.XMLParser(target=builder)
parser.feed(textwrap.dedent("""\
<?pi data?>
<!-- comment -->
<root xmlns='namespace'>
<element key='value'>text</element>
<element>text</element>tail
<empty-element/>
</root>
"""))
self.assertEqual(builder, [
('pi', 'pi', 'data'),
('comment', ' comment '),
('start-ns', '', 'namespace'),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', ''),
])
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_parser_target_end_ns(self):
class Builder(list):
def end_ns(self, prefix):
self.append(("end-ns", prefix))
builder = Builder()
parser = self.etree.XMLParser(target=builder)
parser.feed(textwrap.dedent("""\
<?pi data?>
<!-- comment -->
<root xmlns='namespace' xmlns:p='pns'>
<element key='value'>text</element>
<p:element>text</p:element>tail
<empty-element/>
</root>
"""))
self.assertEqual(builder, [
('end-ns', 'p'),
('end-ns', ''),
])
def test_treebuilder(self):
builder = self.etree.TreeBuilder()
el = builder.start("root", {'a':'A', 'b':'B'})
self.assertEqual("root", el.tag)
self.assertEqual({'a':'A', 'b':'B'}, el.attrib)
builder.data("ROOTTEXT")
el = builder.start("child", {'x':'X', 'y':'Y'})
self.assertEqual("child", el.tag)
self.assertEqual({'x':'X', 'y':'Y'}, el.attrib)
builder.data("CHILDTEXT")
el = builder.end("child")
self.assertEqual("child", el.tag)
self.assertEqual({'x':'X', 'y':'Y'}, el.attrib)
self.assertEqual("CHILDTEXT", el.text)
self.assertEqual(None, el.tail)
builder.data("CHILDTAIL")
root = builder.end("root")
self.assertEqual("root", root.tag)
self.assertEqual("ROOTTEXT", root.text)
self.assertEqual("CHILDTEXT", root[0].text)
self.assertEqual("CHILDTAIL", root[0].tail)
def test_treebuilder_target(self):
parser = self.XMLParser(target=self.etree.TreeBuilder())
parser.feed('<root>ROOTTEXT<child>CHILDTEXT</child>CHILDTAIL</root>')
root = parser.close()
self.assertEqual("root", root.tag)
self.assertEqual("ROOTTEXT", root.text)
self.assertEqual("CHILDTEXT", root[0].text)
self.assertEqual("CHILDTAIL", root[0].tail)
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_treebuilder_comment(self):
ET = self.etree
b = ET.TreeBuilder()
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
b = ET.TreeBuilder(comment_factory=ET.Comment)
self.assertEqual(b.comment('ctext').tag, ET.Comment)
self.assertEqual(b.comment('ctext').text, 'ctext')
#b = ET.TreeBuilder(comment_factory=len)
#self.assertEqual(b.comment('ctext'), len('ctext'))
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_treebuilder_pi(self):
ET = self.etree
is_lxml = ET.__name__ == 'lxml.etree'
b = ET.TreeBuilder()
self.assertEqual(b.pi('target', None).tag, ET.PI)
if is_lxml:
self.assertEqual(b.pi('target', None).target, 'target')
else:
self.assertEqual(b.pi('target', None).text, 'target')
b = ET.TreeBuilder(pi_factory=ET.PI)
self.assertEqual(b.pi('target').tag, ET.PI)
if is_lxml:
self.assertEqual(b.pi('target').target, "target")
else:
self.assertEqual(b.pi('target').text, "target")
self.assertEqual(b.pi('pitarget', ' text ').tag, ET.PI)
if is_lxml:
self.assertEqual(b.pi('pitarget', ' text ').target, "pitarget")
self.assertEqual(b.pi('pitarget', ' text ').text, " text ")
else:
self.assertEqual(b.pi('pitarget', ' text ').text, "pitarget text ")
#b = ET.TreeBuilder(pi_factory=lambda target, text: (len(target), text))
#self.assertEqual(b.pi('target'), (len('target'), None))
#self.assertEqual(b.pi('pitarget', ' text '), (len('pitarget'), ' text '))
def test_late_tail(self):
# Issue #37399: The tail of an ignored comment could overwrite the text before it.
ET = self.etree
class TreeBuilderSubclass(ET.TreeBuilder):
pass
if ET.__name__ == 'lxml.etree':
def assert_content(a):
self.assertEqual(a.text, "text")
self.assertEqual(a[0].tail, "tail")
else:
def assert_content(a):
self.assertEqual(a.text, "texttail")
xml = "<a>text<!-- comment -->tail</a>"
a = ET.fromstring(xml)
assert_content(a)
parser = ET.XMLParser(target=TreeBuilderSubclass())
parser.feed(xml)
a = parser.close()
assert_content(a)
xml = "<a>text<?pi data?>tail</a>"
a = ET.fromstring(xml)
assert_content(a)
xml = "<a>text<?pi data?>tail</a>"
parser = ET.XMLParser(target=TreeBuilderSubclass())
parser.feed(xml)
a = parser.close()
assert_content(a)
@et_needs_pyversion(3, 8, 0, 'alpha', 4)
def test_late_tail_mix_pi_comments(self):
# Issue #37399: The tail of an ignored comment could overwrite the text before it.
# Test appending tails to comments/pis.
ET = self.etree
class TreeBuilderSubclass(ET.TreeBuilder):
pass
xml = "<a>text<?pi1?> <!-- comment -->\n<?pi2?>tail</a>"
parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True, insert_pis=False))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, ' comment ')
self.assertEqual(a[0].tail, '\ntail')
self.assertEqual(a.text, "text ")
parser = ET.XMLParser(target=TreeBuilderSubclass(insert_comments=True, insert_pis=False))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text, ' comment ')
self.assertEqual(a[0].tail, '\ntail')
self.assertEqual(a.text, "text ")
xml = "<a>text<!-- comment -->\n<?pi data?>tail</a>"
parser = ET.XMLParser(target=ET.TreeBuilder(insert_pis=True, insert_comments=False))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text[-4:], 'data')
self.assertEqual(a[0].tail, 'tail')
self.assertEqual(a.text, "text\n")
parser = ET.XMLParser(target=TreeBuilderSubclass(insert_pis=True, insert_comments=False))
parser.feed(xml)
a = parser.close()
self.assertEqual(a[0].text[-4:], 'data')
self.assertEqual(a[0].tail, 'tail')
self.assertEqual(a.text, "text\n")
# helper methods
def _writeElement(self, element, encoding='us-ascii'):
"""Write out element for comparison.
"""
data = self.etree.tostring(element, encoding=encoding)
return canonicalize(data)
def _writeElementFile(self, element, encoding='us-ascii'):
"""Write out element for comparison, using real file.
"""
ElementTree = self.etree.ElementTree
with tmpfile() as filename:
with open(filename, 'wb') as f:
tree = ElementTree(element=element)
tree.write(f, encoding=encoding)
with open(filename, 'rb') as f:
data = f.read()
return canonicalize(data)
def assertXML(self, expected, element, encoding='us-ascii'):
"""Writes element out and checks whether it is expected.
Does this two ways; once using BytesIO, once using a real file.
"""
if isinstance(expected, str):
expected = expected.encode(encoding)
self.assertEqual(expected, self._writeElement(element, encoding))
self.assertEqual(expected, self._writeElementFile(element, encoding))
def assertEncodingDeclaration(self, result, encoding):
"Checks if the result XML byte string specifies the encoding."
enc_re = r"<\?xml[^>]+ encoding=[\"']([^\"']+)[\"']"
if isinstance(result, str):
has_encoding = re.compile(enc_re).match
else:
has_encoding = re.compile(enc_re.encode('ascii')).match
self.assertTrue(has_encoding(result))
result_encoding = has_encoding(result).group(1)
self.assertEqual(result_encoding.upper(), encoding.upper())
def _rootstring(self, tree):
return self.etree.tostring(tree.getroot()).replace(
b' ', b'').replace(b'\n', b'')
def _check_element_tree(self, tree):
self._check_element(tree.getroot())
def _check_element(self, element):
self.assertTrue(hasattr(element, 'tag'))
self.assertTrue(hasattr(element, 'attrib'))
self.assertTrue(hasattr(element, 'text'))
self.assertTrue(hasattr(element, 'tail'))
self._check_string(element.tag)
self._check_mapping(element.attrib)
if element.text is not None:
self._check_string(element.text)
if element.tail is not None:
self._check_string(element.tail)
def _check_string(self, string):
len(string)
for char in string:
self.assertEqual(1, len(char))
new_string = string + ""
new_string = string + " "
string[:0]
def _check_mapping(self, mapping):
len(mapping)
keys = mapping.keys()
values = mapping.values()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual("value", mapping["key"])
| _ETreeTestCaseBase |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/customPlot.py | {
"start": 182,
"end": 852
} | class ____(pg.ViewBox):
def __init__(self, *args, **kwds):
kwds['enableMenu'] = False
pg.ViewBox.__init__(self, *args, **kwds)
self.setMouseMode(self.RectMode)
## reimplement right-click to zoom out
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.MouseButton.RightButton:
self.autoRange()
## reimplement mouseDragEvent to disable continuous axis zoom
def mouseDragEvent(self, ev, axis=None):
if axis is not None and ev.button() == QtCore.Qt.MouseButton.RightButton:
ev.ignore()
else:
pg.ViewBox.mouseDragEvent(self, ev, axis=axis)
| CustomViewBox |
python | fsspec__filesystem_spec | fsspec/tests/conftest.py | {
"start": 1005,
"end": 6854
} | class ____(BaseHTTPRequestHandler):
static_files = {
"/index/realfile": data,
"/index/otherfile": data,
"/index": _make_index_listing,
"/data/20020401": listing,
"/simple/": _make_listing("/simple/file", "/simple/dir/"),
"/simple/file": data,
"/simple/dir/": _make_listing("/simple/dir/file"),
"/simple/dir/file": data,
"/unauthorized": AssertionError("shouldn't access"),
}
dynamic_files = {}
files = ChainMap(dynamic_files, static_files)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _respond(self, code=200, headers=None, data=b""):
headers = headers or {}
headers.update({"User-Agent": "test"})
self.send_response(code)
for k, v in headers.items():
self.send_header(k, str(v))
self.end_headers()
if data:
self.wfile.write(data)
def do_GET(self):
baseurl = f"http://127.0.0.1:{self.server.server_port}"
file_path = self.path
if file_path.endswith("/") and file_path.rstrip("/") in self.files:
file_path = file_path.rstrip("/")
file_data = self.files.get(file_path)
if callable(file_data):
file_data = file_data(baseurl)
if "give_path" in self.headers:
return self._respond(200, data=json.dumps({"path": self.path}).encode())
if "redirect" in self.headers and file_path != "/index/realfile":
new_url = _make_realfile(baseurl)
return self._respond(301, {"Location": new_url})
if file_path == "/unauthorized":
return self._respond(401)
if file_data is None:
return self._respond(404)
status = 200
content_range = f"bytes 0-{len(file_data) - 1}/{len(file_data)}"
if ("Range" in self.headers) and ("ignore_range" not in self.headers):
ran = self.headers["Range"]
b, ran = ran.split("=")
start, end = ran.split("-")
if start:
content_range = f"bytes {start}-{end}/{len(file_data)}"
file_data = file_data[int(start) : (int(end) + 1) if end else None]
else:
# suffix only
l = len(file_data)
content_range = f"bytes {l - int(end)}-{l - 1}/{l}"
file_data = file_data[-int(end) :]
if "use_206" in self.headers:
status = 206
if "give_length" in self.headers:
if "gzip_encoding" in self.headers:
file_data = gzip.compress(file_data)
response_headers = {
"Content-Length": len(file_data),
"Content-Encoding": "gzip",
}
else:
response_headers = {"Content-Length": len(file_data)}
self._respond(status, response_headers, file_data)
elif "give_range" in self.headers:
self._respond(status, {"Content-Range": content_range}, file_data)
elif "give_mimetype" in self.headers:
self._respond(
status, {"Content-Type": "text/html; charset=utf-8"}, file_data
)
else:
self._respond(status, data=file_data)
def do_POST(self):
length = self.headers.get("Content-Length")
file_path = self.path.rstrip("/")
if length is None:
assert self.headers.get("Transfer-Encoding") == "chunked"
self.files[file_path] = b"".join(self.read_chunks())
else:
self.files[file_path] = self.rfile.read(int(length))
self._respond(200)
do_PUT = do_POST
def read_chunks(self):
length = -1
while length != 0:
line = self.rfile.readline().strip()
if len(line) == 0:
length = 0
else:
length = int(line, 16)
yield self.rfile.read(length)
self.rfile.readline()
def do_HEAD(self):
r_headers = {}
if "head_not_auth" in self.headers:
r_headers["Content-Length"] = 123
return self._respond(403, r_headers, b"not authorized for HEAD request")
elif "head_ok" not in self.headers:
return self._respond(405)
file_path = self.path.rstrip("/")
file_data = self.files.get(file_path)
if file_data is None:
return self._respond(404)
if ("give_length" in self.headers) or ("head_give_length" in self.headers):
if "zero_length" in self.headers:
r_headers["Content-Length"] = 0
elif "gzip_encoding" in self.headers:
file_data = gzip.compress(file_data)
r_headers["Content-Encoding"] = "gzip"
r_headers["Content-Length"] = len(file_data)
else:
r_headers["Content-Length"] = len(file_data)
elif "give_range" in self.headers:
r_headers["Content-Range"] = f"0-{len(file_data) - 1}/{len(file_data)}"
elif "give_etag" in self.headers:
r_headers["ETag"] = "xxx"
if self.headers.get("accept_range") == "none":
r_headers["Accept-Ranges"] = "none"
self._respond(200, r_headers)
@contextlib.contextmanager
def serve():
server_address = ("", 0)
httpd = HTTPServer(server_address, HTTPTestHandler)
th = threading.Thread(target=httpd.serve_forever)
th.daemon = True
th.start()
try:
yield f"http://127.0.0.1:{httpd.server_port}"
finally:
httpd.socket.close()
httpd.shutdown()
th.join()
@pytest.fixture(scope="module")
def server():
with serve() as s:
server = SimpleNamespace(address=s, realfile=_make_realfile(s))
yield server
| HTTPTestHandler |
python | ApeWorX__ape | src/ape/utils/basemodel.py | {
"start": 12756,
"end": 18855
} | class ____:
"""
A mixin to use on models that provide ``ExtraModelAttributes``.
**NOTE**: Must come _before_ your base-model class in subclass tuple to function.
"""
def __ape_extra_attributes__(self) -> Iterator[ExtraModelAttributes]:
"""
Override this method to supply extra attributes
to a model in Ape; this allow more properties
to be available when invoking ``__getattr__``.
Returns:
Iterator[:class:`~ape.utils.basemodel.ExtraModelAttributes`]: A
series of instances defining extra model attributes.
"""
return iter(())
@only_raise_attribute_error
def __getattr__(self, name: str) -> Any:
"""
An overridden ``__getattr__`` implementation that takes into
account :meth:`~ape.utils.basemodel.ExtraAttributesMixin.__ape_extra_attributes__`.
"""
_assert_not_ipython_check(name)
private_attrs = (self.__pydantic_private__ or {}) if isinstance(self, RootBaseModel) else {}
if name in private_attrs:
_recursion_checker.reset(name)
return private_attrs[name]
return get_attribute_with_extras(self, name)
def __getitem__(self, name: Any) -> Any:
# For __getitem__, we first try the extra (unlike `__getattr__`).
return get_item_with_extras(self, name)
def get_attribute_with_extras(obj: Any, name: str, coerce_attr_error: bool = True) -> Any:
_assert_not_ipython_check(name)
if _recursion_checker.check(name):
# Prevent segfaults.
# First, attempt to get real error.
message = f"Failed trying to get {name}"
if real_error := _recursion_checker.getattr_errors.get(name):
message = f"{message}. {real_error}"
_recursion_checker.reset(name)
raise AttributeError(message)
_recursion_checker.add(name)
res = None
if not isinstance(obj, ExtraAttributesMixin):
name = getattr(type(obj), "__name__", "obj")
raise AttributeError(f"{name} must use the '{ExtraAttributesMixin.__name__}' mixin'")
try:
res = super(ExtraAttributesMixin, obj).__getattribute__(name)
except AttributeError as base_attr_err:
_recursion_checker.getattr_errors[name] = base_attr_err
if res is not None:
_recursion_checker.reset(name)
return res
if name.startswith("__") and name.endswith("__"):
# Don't seek double-dunderized definitions from extras.
raise AttributeError(name)
# NOTE: Do not check extras within the error handler to avoid
# errors occurring within an exception handler (Python shows that differently).
extras_checked = set()
for ape_extra in obj.__ape_extra_attributes__():
if not ape_extra.include_getattr:
continue
extras_checked.add(ape_extra.name)
try:
if name in ape_extra:
# Attribute was found in one of the supplied
# extra attributes mappings.
result = ape_extra.get(name)
# NOTE: Don't reset until _after_ we have the result.
_recursion_checker.reset(name)
return result
except Exception as err:
_recursion_checker.reset(name)
raise ApeAttributeError(f"{name} - {err}", base_err=err) from err
# The error message mentions the alternative mappings,
# such as a contract-type map.
base_err = None
if name in _recursion_checker.getattr_errors:
# There was an error getting the value. Show that.
base_err = _recursion_checker.getattr_errors[name]
message = str(base_err)
else:
message = f"'{obj!r}' has no attribute '{name}'"
if extras_checked:
extras_str = ", ".join(sorted(extras_checked))
suffix = f"Also checked extra(s) '{extras_str}'"
if suffix not in message:
if message and message[-1] not in (".", "?", "!"):
message = f"{message}."
message = f"{message} {suffix}"
_recursion_checker.reset(name)
if message and message[-1] not in (".", "?", "!"):
message = f"{message}."
if base_err and not coerce_attr_error:
raise base_err
# Coerce whatever error to automatically be an AttributeError
# (required for __getattr__ or must handle independently).
attr_err = ApeAttributeError(message)
if base_err:
raise attr_err from base_err
else:
raise attr_err
def get_item_with_extras(obj: Any, name: str) -> Any:
# For __getitem__, we first try the extra (unlike `__getattr__`).
extras_checked = set()
additional_error_messages = {}
for extra in obj.__ape_extra_attributes__():
if not extra.include_getitem:
continue
if name in extra:
return extra.get(name)
extras_checked.add(extra.name)
if extra.additional_error_message:
additional_error_messages[extra.name] = extra.additional_error_message
# NOTE: If extras were supplied, the user was expecting it to be
# there (unlike __getattr__).
if extras_checked:
prefix = f"Unable to find '{name}' in"
if not additional_error_messages:
extras_str = ", ".join(extras_checked)
message = f"{prefix} any of '{extras_str}'."
else:
# The class is including additional error messages for the IndexError.
message = ""
for extra_checked in extras_checked:
additional_message = additional_error_messages.get(extra_checked)
suffix = f" {additional_message}" if additional_message else ""
sub_message = f"{prefix} '{extra_checked}'.{suffix}"
message = f"{message}\n{sub_message}" if message else sub_message
raise ApeIndexError(message)
# The user did not supply any extra __getitem__ attributes.
# Do what you would have normally done.
return super(ExtraAttributesMixin, obj).__getitem__(name) # type: ignore
| ExtraAttributesMixin |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 134591,
"end": 136585
} | class ____(rv_continuous):
r"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is:
.. math::
f(x) = \exp(x - e^x)
for real :math:`x`.
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(after_notes)s
%(example)s
"""
def _shape_info(self):
return []
def _pdf(self, x):
# gumbel_l.pdf(x) = exp(x - exp(x))
return np.exp(self._logpdf(x))
def _logpdf(self, x):
return x - np.exp(x)
def _cdf(self, x):
return -sc.expm1(-np.exp(x))
def _ppf(self, q):
return np.log(-sc.log1p(-q))
def _logsf(self, x):
return -np.exp(x)
def _sf(self, x):
return np.exp(-np.exp(x))
def _isf(self, x):
return np.log(-np.log(x))
def _stats(self):
return -_EULER, np.pi*np.pi/6.0, \
-12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return _EULER + 1.
@_call_super_mom
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
# The fit method of `gumbel_r` can be used for this distribution with
# small modifications. The process to do this is
# 1. pass the sign negated data into `gumbel_r.fit`
# - if the location is fixed, it should also be negated.
# 2. negate the sign of the resulting location, leaving the scale
# unmodified.
# `gumbel_r.fit` holds necessary input checks.
if kwds.get('floc') is not None:
kwds['floc'] = -kwds['floc']
loc_r, scale_r, = gumbel_r.fit(-np.asarray(data), *args, **kwds)
return -loc_r, scale_r
gumbel_l = gumbel_l_gen(name='gumbel_l')
| gumbel_l_gen |
python | pytorch__pytorch | torchgen/api/python.py | {
"start": 14039,
"end": 19025
} | class ____:
# Base operator name, without inplace/outplace suffix.
name: str
# Positional arguments.
# TODO: create a dedicated SelfArgument type for 'self'?
input_args: tuple[PythonArgument, ...]
# Keyword arguments excluding the 'out' argument and scattered kwargs belonging
# to TensorOptions (dtype, layout, device, pin_memory, requires_grad, etc).
input_kwargs: tuple[PythonArgument, ...]
output_args: PythonOutArgument | None
# Return types, which are only used by pyi
returns: PythonReturns
# These are scattered kwargs arguments belonging to TensorOptions.
# When binding to C++, they are packed into a TensorOptions object 'options'.
# It's possible that the C++ signature doesn't take TensorOptions object (e.g.
# for out variant), in which case they will be used as scattered fields without
# being packed into 'options'.
# TODO: maybe create a PythonTensorOptionsArgument?
tensor_options_args: tuple[PythonArgument, ...]
# method or function signature?
method: bool
@property
def deprecated(self) -> bool:
return False
def arguments(
self, *, skip_outputs: bool = False, skip_tensor_options: bool = False
) -> tuple[PythonArgument | PythonOutArgument, ...]:
result: list[PythonArgument | PythonOutArgument] = []
result.extend(self.input_args)
result.extend(self.input_kwargs)
if self.output_args is not None and not skip_outputs:
result.append(self.output_args)
if not skip_tensor_options:
result.extend(self.tensor_options_args)
return tuple(result)
def arguments_count(self) -> int:
return len(self.arguments())
def output_idx(self) -> int:
return len(self.input_args) + len(self.input_kwargs)
# [old codegen] Compute the Python function signature for argument parsing,
# as specified in torch/csrc/utils/python_arg_parser.h. WARNING:
# this is NOT the same type signature as specified by PEP 484
# as understood by mypy; our format was independently developed
# and has some quirks to make it more suitable specifically
# for error parsing.
#
# For a translation to mypy-valid type signatures, see
# signature_str_pyi().
def signature_str(self, *, skip_outputs: bool = False, symint: bool = True) -> str:
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: list[str] = [
a.argument_str(method=self.method, symint=symint) for a in args
]
positional_argc = len(self.input_args)
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
return f"{self.name}({', '.join(schema_formals)})"
def signature_str_pyi(self, *, skip_outputs: bool = False) -> str:
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: list[str] = [
a.argument_str_pyi(method=self.method) for a in args
]
positional_argc = len(self.input_args)
if len(schema_formals) > positional_argc:
schema_formals.insert(positional_argc, "*")
# only pyi signatures include returns
returns_str = returns_str_pyi(self)
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
return format_function_signature(self.name, schema_formals, returns_str)
def signature_str_pyi_vararg(self, *, skip_outputs: bool = False) -> str | None:
# only pyi uses vararg signatures
args = self.arguments(skip_outputs=skip_outputs)
schema_formals: list[str] = [
a.argument_str_pyi(method=self.method) for a in args
]
# vararg only applies to pyi signatures. vararg variants are not generated for all signatures
num_args = self.arguments_count()
if num_args == 0:
return None
num_positionalargs = len(self.input_args)
vararg_type = args[0].type
if not (
isinstance(vararg_type, ListType)
and str(vararg_type.elem) in ["int", "SymInt"]
and num_positionalargs == 1
):
return None
# Below are the major changes in vararg vs. regular pyi signatures
# vararg signatures also omit the asterix
assert isinstance(vararg_type, ListType)
schema_formals[0] = (
"*" + args[0].name + ": " + argument_type_str_pyi(vararg_type.elem)
)
returns_str = returns_str_pyi(self)
# pyi also includes self (with no typing/defaults) for methods
if self.method:
schema_formals.insert(0, "self")
return format_function_signature(self.name, schema_formals, returns_str)
# The deprecated python signature involves some special logic, so create a
# dedicated data model to store these extra properties.
@dataclass(frozen=True)
| PythonSignature |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/properties/snippets.py | {
"start": 3331,
"end": 3406
} | class ____(messages.Enum):
RED = 620
GREEN = 495
BLUE = 450
| Color |
python | walkccc__LeetCode | solutions/1756. Design Most Recently Used Queue/1756.py | {
"start": 42,
"end": 392
} | class ____:
def __init__(self, n: int):
# [(priority value, actual value)]
self.q = SortedList((i, i) for i in range(1, n + 1))
def fetch(self, k: int) -> int:
_, num = self.q.pop(k - 1)
if self.q:
maxPriority = self.q[-1][0]
self.q.add((maxPriority + 1, num))
else:
self.q.add((0, num))
return num
| MRUQueue |
python | conda__conda | conda/exceptions.py | {
"start": 1634,
"end": 2297
} | class ____(CondaError):
def __init__(self, bad_deps: Iterable[Iterable[MatchSpec]]):
# bad_deps is a list of lists
# bad_deps should really be named 'invalid_chains'
self.bad_deps = tuple(dep for deps in bad_deps for dep in deps if dep)
formatted_chains = tuple(
" -> ".join(map(str, bad_chain)) for bad_chain in bad_deps
)
self._formatted_chains = formatted_chains
message = "\n" + "\n".join(
(f" - {bad_chain}") for bad_chain in formatted_chains
)
super().__init__(message)
NoPackagesFound = NoPackagesFoundError = ResolvePackageNotFound
| ResolvePackageNotFound |
python | plotly__plotly.py | plotly/graph_objs/scatterternary/_selected.py | {
"start": 233,
"end": 3413
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterternary"
_path_str = "scatterternary.selected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.selected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.scatterternary.selected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.selected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.scatterternary.selected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.scatterternary.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.selected.Te
xtfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Selected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Selected`
marker
:class:`plotly.graph_objects.scatterternary.selected.Ma
rker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.selected.Te
xtfont` instance or dict with compatible properties
Returns
-------
Selected
"""
super().__init__("selected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterternary.Selected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Selected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Selected |
python | pennersr__django-allauth | allauth/account/mixins.py | {
"start": 3597,
"end": 4311
} | class ____:
template_name_signup_closed = (
"account/signup_closed." + app_settings.TEMPLATE_EXTENSION
)
def dispatch(self, request, *args, **kwargs):
try:
if not self.is_open():
return self.closed()
except ImmediateHttpResponse as e:
return e.response
return super().dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter(self.request).is_open_for_signup(self.request)
def closed(self):
response_kwargs = {
"request": self.request,
"template": self.template_name_signup_closed,
}
return self.response_class(**response_kwargs)
| CloseableSignupMixin |
python | walkccc__LeetCode | solutions/2660. Determine the Winner of a Bowling Game/2660.py | {
"start": 0,
"end": 486
} | class ____:
def isWinner(self, player1: list[int], player2: list[int]) -> int:
def getScore(player: list[int]) -> int:
INVALID = -3
score = 0
last10 = INVALID
for i, p in enumerate(player):
score += p if i - last10 > 2 else p * 2
if p == 10:
last10 = i
return score
score1 = getScore(player1)
score2 = getScore(player2)
if score1 > score2:
return 1
if score2 > score1:
return 2
return 0
| Solution |
python | great-expectations__great_expectations | great_expectations/data_context/migrator/file_migrator.py | {
"start": 819,
"end": 6509
} | class ____:
"""Encapsulates any logic necessary to convert an existing context to a FileDataContext
Only takes in the necessary dependencies for conversion:
- context.stores
- context._datasource_store
- context.variables
- context.fluent_config
"""
def __init__(
self,
primary_stores: dict[str, Store],
datasource_store: DatasourceStore,
variables: DataContextVariables,
fluent_config: GxConfig,
) -> None:
self._primary_stores = primary_stores
self._datasource_store = datasource_store
self._variables = variables
self._fluent_config = fluent_config
def migrate(self) -> FileDataContext:
"""Migrate your in-memory Data Context to a file-backed one.
Takes the following steps:
1. Scaffolds filesystem
2. Migrates primary stores (only creates default named stores)
3. Migrates datasource store
4. Migrates data docs sites (both physical files and config)
5. Migrates fluent datasources
Returns:
A FileDataContext with an updated config to reflect the state of the current context.
"""
target_context = self._scaffold_filesystem()
self._migrate_primary_stores(
target_stores=target_context.stores,
)
self._migrate_datasource_store(target_store=target_context._datasource_store)
self._migrate_data_docs_sites(
target_context=target_context,
)
self._migrate_fluent_datasources(target_context=target_context)
# Re-init context to parse filesystem changes into config
target_context = FileDataContext()
print(f"Successfully migrated to {target_context.__class__.__name__}!")
return target_context
def _scaffold_filesystem(self) -> FileDataContext:
path = pathlib.Path.cwd().absolute()
target_context = gx.get_context(mode="file", project_root_dir=str(path))
logger.info("Scaffolded necessary directories for a file-backed context")
return target_context
def _migrate_primary_stores(self, target_stores: dict[str, Store]) -> None:
source_stores = self._primary_stores
for name, source_store in source_stores.items():
target_store = target_stores.get(name)
if target_store:
self._migrate_store(
store_name=name,
source_store=source_store,
target_store=target_store,
)
else:
logger.warning(
f"Could not migrate the contents of store {name}; only default named stores are migrated" # noqa: E501 # FIXME CoP
)
def _migrate_datasource_store(self, target_store: DatasourceStore) -> None:
source_store = self._datasource_store
self._migrate_store(
store_name=DataContextConfigDefaults.DEFAULT_DATASOURCE_STORE_NAME.value,
source_store=source_store,
target_store=target_store,
)
def _migrate_store(self, store_name: str, source_store: Store, target_store: Store) -> None:
logger.info(f"Migrating key-value pairs from {store_name} ({source_store.__class__}).")
for key in source_store.list_keys():
source_obj = source_store.get(key)
target_store.add(key=key, value=source_obj)
logger.info(f"Successfully migrated stored object saved with key {key}.")
def _migrate_data_docs_sites(self, target_context: FileDataContext) -> None:
target_root = pathlib.Path(target_context.root_directory)
target_variables = target_context.variables
source_configs = self._variables.data_docs_sites or {}
self._migrate_data_docs_site_configs(
target_root=target_root,
source_configs=source_configs,
target_variables=target_variables,
)
target_context.build_data_docs()
def _migrate_fluent_datasources(self, target_context: FileDataContext) -> None:
target_context.fluent_config = self._fluent_config
target_context._save_project_config()
def _migrate_data_docs_site_configs(
self,
source_configs: dict,
target_root: pathlib.Path,
target_variables: DataContextVariables,
):
target_base_directory = target_root.joinpath(
DataContextConfigDefaults.DEFAULT_DATA_DOCS_BASE_DIRECTORY_RELATIVE_NAME.value
)
updated_data_docs_config = {}
for site_name, site_config in source_configs.items():
updated_site_config = self._migrate_data_docs_site_config(
site_name=site_name,
site_config=site_config,
target_base_directory=target_base_directory,
)
updated_data_docs_config[site_name] = updated_site_config
# If no sites to migrate, don't touch config defaults
if updated_data_docs_config:
target_variables.data_docs_sites = updated_data_docs_config
target_variables.save()
def _migrate_data_docs_site_config(
self, site_name: str, site_config: dict, target_base_directory: pathlib.Path
) -> dict:
absolute_site_path = target_base_directory.joinpath(site_name)
project_root = pathlib.Path.cwd().joinpath(SerializableDataContext.GX_DIR)
relative_site_path = absolute_site_path.relative_to(project_root)
updated_config = site_config
updated_config["store_backend"]["base_directory"] = str(relative_site_path)
return updated_config
| FileMigrator |
python | django__django | tests/composite_pk/test_checks.py | {
"start": 235,
"end": 11213
} | class ____(TestCase):
maxDiff = None
def test_composite_pk_must_be_unique_strings(self):
test_cases = (
(),
(0,),
(1,),
("id", False),
("id", "id"),
(("id",),),
)
for i, args in enumerate(test_cases):
with (
self.subTest(args=args),
self.assertRaisesMessage(
ValueError, "CompositePrimaryKey args must be unique strings."
),
):
models.CompositePrimaryKey(*args)
def test_composite_pk_must_include_at_least_2_fields(self):
expected_message = "CompositePrimaryKey must include at least two fields."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("id")
def test_composite_pk_cannot_have_a_default(self):
expected_message = "CompositePrimaryKey cannot have a default."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", default=(1, 1))
def test_composite_pk_cannot_have_a_database_default(self):
expected_message = "CompositePrimaryKey cannot have a database default."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", db_default=models.F("id"))
def test_composite_pk_cannot_have_a_db_column(self):
expected_message = "CompositePrimaryKey cannot have a db_column."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", db_column="tenant_pk")
def test_composite_pk_cannot_be_editable(self):
expected_message = "CompositePrimaryKey cannot be editable."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", editable=True)
def test_composite_pk_must_be_a_primary_key(self):
expected_message = "CompositePrimaryKey must be a primary key."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", primary_key=False)
def test_composite_pk_must_be_blank(self):
expected_message = "CompositePrimaryKey must be blank."
with self.assertRaisesMessage(ValueError, expected_message):
models.CompositePrimaryKey("tenant_id", "id", blank=False)
def test_composite_pk_must_not_have_other_pk_field(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("foo_id", "id")
foo_id = models.IntegerField()
id = models.IntegerField(primary_key=True)
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"The model cannot have more than one field with "
"'primary_key=True'.",
obj=Foo,
id="models.E026",
),
],
)
def test_composite_pk_cannot_include_nullable_field(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("foo_id", "id")
foo_id = models.IntegerField()
id = models.IntegerField(null=True)
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"'id' cannot be included in the composite primary key.",
hint="'id' field may not set 'null=True'.",
obj=Foo,
id="models.E042",
),
],
)
def test_composite_pk_can_include_fk_name(self):
class Foo(models.Model):
pass
class Bar(models.Model):
pk = models.CompositePrimaryKey("foo", "id")
foo = models.ForeignKey(Foo, on_delete=models.CASCADE)
id = models.SmallIntegerField()
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(Bar.check(databases=self.databases), [])
def test_composite_pk_cannot_include_same_field(self):
class Foo(models.Model):
pass
class Bar(models.Model):
pk = models.CompositePrimaryKey("foo", "foo_id")
foo = models.ForeignKey(Foo, on_delete=models.CASCADE)
id = models.SmallIntegerField()
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(
Bar.check(databases=self.databases),
[
checks.Error(
"'foo_id' cannot be included in the composite primary key.",
hint="'foo_id' and 'foo' are the same fields.",
obj=Bar,
id="models.E042",
),
],
)
def test_composite_pk_cannot_include_composite_pk_field(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("id", "pk")
id = models.SmallIntegerField()
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"'pk' cannot be included in the composite primary key.",
hint="'pk' field has no column.",
obj=Foo,
id="models.E042",
),
],
)
def test_composite_pk_cannot_include_db_column(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("foo", "bar")
foo = models.SmallIntegerField(db_column="foo_id")
bar = models.SmallIntegerField(db_column="bar_id")
class Bar(models.Model):
pk = models.CompositePrimaryKey("foo_id", "bar_id")
foo = models.SmallIntegerField(db_column="foo_id")
bar = models.SmallIntegerField(db_column="bar_id")
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(
Bar.check(databases=self.databases),
[
checks.Error(
"'foo_id' cannot be included in the composite primary key.",
hint="'foo_id' is not a valid field.",
obj=Bar,
id="models.E042",
),
checks.Error(
"'bar_id' cannot be included in the composite primary key.",
hint="'bar_id' is not a valid field.",
obj=Bar,
id="models.E042",
),
],
)
def test_foreign_object_can_refer_composite_pk(self):
class Foo(models.Model):
pass
class Bar(models.Model):
pk = models.CompositePrimaryKey("foo_id", "id")
foo = models.ForeignKey(Foo, on_delete=models.CASCADE)
id = models.IntegerField()
class Baz(models.Model):
pk = models.CompositePrimaryKey("foo_id", "id")
foo = models.ForeignKey(Foo, on_delete=models.CASCADE)
id = models.IntegerField()
bar_id = models.IntegerField()
bar = models.ForeignObject(
Bar,
on_delete=models.CASCADE,
from_fields=("foo_id", "bar_id"),
to_fields=("foo_id", "id"),
)
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(Bar.check(databases=self.databases), [])
self.assertEqual(Baz.check(databases=self.databases), [])
def test_composite_pk_must_be_named_pk(self):
class Foo(models.Model):
primary_key = models.CompositePrimaryKey("foo_id", "id")
foo_id = models.IntegerField()
id = models.IntegerField()
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"'CompositePrimaryKey' must be named 'pk'.",
obj=Foo._meta.get_field("primary_key"),
id="fields.E013",
),
],
)
@skipUnlessAnyDBFeature(
"supports_virtual_generated_columns",
"supports_stored_generated_columns",
)
def test_composite_pk_cannot_include_generated_field(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("id", "foo")
id = models.IntegerField()
foo = models.GeneratedField(
expression=F("id"),
output_field=models.IntegerField(),
db_persist=connection.features.supports_stored_generated_columns,
)
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"'foo' cannot be included in the composite primary key.",
hint="'foo' field is a generated field.",
obj=Foo,
id="models.E042",
),
],
)
def test_composite_pk_cannot_include_non_local_field(self):
class Foo(models.Model):
a = models.SmallIntegerField()
class Bar(Foo):
pk = models.CompositePrimaryKey("a", "b")
b = models.SmallIntegerField()
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(
Bar.check(databases=self.databases),
[
checks.Error(
"'a' cannot be included in the composite primary key.",
hint="'a' field is not a local field.",
obj=Bar,
id="models.E042",
),
],
)
def test_proxy_model_can_subclass_model_with_composite_pk(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("a", "b")
a = models.SmallIntegerField()
b = models.SmallIntegerField()
class Bar(Foo):
class Meta:
proxy = True
self.assertEqual(Foo.check(databases=self.databases), [])
self.assertEqual(Bar.check(databases=self.databases), [])
def test_proxy_model_does_not_check_superclass_composite_pk_errors(self):
class Foo(models.Model):
pk = models.CompositePrimaryKey("a", "b")
a = models.SmallIntegerField()
class Bar(Foo):
class Meta:
proxy = True
self.assertEqual(
Foo.check(databases=self.databases),
[
checks.Error(
"'b' cannot be included in the composite primary key.",
hint="'b' is not a valid field.",
obj=Foo,
id="models.E042",
),
],
)
self.assertEqual(Bar.check(databases=self.databases), [])
| CompositePKChecksTests |
python | django__django | tests/test_runner/models.py | {
"start": 31,
"end": 438
} | class ____(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
friends = models.ManyToManyField("self")
system_check_run_count = 0
@classmethod
def check(cls, *args, **kwargs):
cls.system_check_run_count += 1
return super().check(**kwargs)
# A set of models that use a non-abstract inherited 'through' model.
| Person |
python | jazzband__django-simple-history | simple_history/tests/view.py | {
"start": 605,
"end": 1018
} | class ____(View):
def post(self, request, *args, **kwargs):
poll_info_list = [
{"question": "1", "pub_date": date(2020, 1, 1)},
{"question": "2", "pub_date": date(2020, 1, 2)},
]
polls_to_create = [Poll(**poll_info) for poll_info in poll_info_list]
bulk_create_with_history(polls_to_create, Poll)
return HttpResponse(status=200)
| PollBulkCreateView |
python | rapidsai__cudf | python/cudf/cudf/core/column/struct.py | {
"start": 1016,
"end": 8710
} | class ____(ColumnBase):
"""
Column that stores fields of values.
Every column has n children, where n is
the number of fields in the Struct Dtype.
"""
_VALID_PLC_TYPES = {plc.TypeId.STRUCT}
def __init__(
self,
plc_column: plc.Column,
size: int,
dtype: StructDtype,
offset: int,
null_count: int,
exposed: bool,
):
dtype = self._validate_dtype_instance(dtype)
super().__init__(
plc_column=plc_column,
size=size,
dtype=dtype,
offset=offset,
null_count=null_count,
exposed=exposed,
)
def _get_children_from_pylibcudf_column(
self,
plc_column: plc.Column,
dtype: StructDtype, # type: ignore[override]
exposed: bool,
) -> tuple[ColumnBase, ...]:
return tuple(
child._with_type_metadata(field_dtype)
for child, field_dtype in zip(
super()._get_children_from_pylibcudf_column(
plc_column, dtype=dtype, exposed=exposed
),
dtype.fields.values(),
strict=True,
)
)
def _prep_pandas_compat_repr(self) -> StringColumn | Self:
"""
Preprocess Column to be compatible with pandas repr, namely handling nulls.
* null (datetime/timedelta) = str(pd.NaT)
* null (other types)= str(pd.NA)
"""
# TODO: handle if self.has_nulls(): case
return self
@staticmethod
def _validate_dtype_instance(dtype: StructDtype) -> StructDtype:
# IntervalDtype is a subclass of StructDtype, so compare types exactly
if (
not cudf.get_option("mode.pandas_compatible")
and type(dtype) is not StructDtype
) or (
cudf.get_option("mode.pandas_compatible")
and not is_dtype_obj_struct(dtype)
):
raise ValueError(
f"{type(dtype).__name__} must be a StructDtype exactly."
)
return dtype
@property
def base_size(self) -> int:
if self.base_children:
return len(self.base_children[0])
else:
return self.size + self.offset
def to_arrow(self) -> pa.Array:
children = [child.to_arrow() for child in self.children]
dtype: StructDtype = (
pyarrow_dtype_to_cudf_dtype(self.dtype) # type: ignore[assignment]
if isinstance(self.dtype, pd.ArrowDtype)
else self.dtype
)
pa_type = pa.struct(
{
field: child.type
for field, child in zip(dtype.fields, children, strict=True)
}
)
if self.mask is not None:
buffers = [pa.py_buffer(self.mask.memoryview())]
else:
# PyArrow stubs are too strict - from_buffers should accept None for missing buffers
buffers = [None] # type: ignore[list-item]
return pa.StructArray.from_buffers(
pa_type, len(self), buffers, children=children
)
def to_pandas(
self,
*,
nullable: bool = False,
arrow_type: bool = False,
) -> pd.Index:
# We cannot go via Arrow's `to_pandas` because of the following issue:
# https://issues.apache.org/jira/browse/ARROW-12680
if (
arrow_type
or nullable
or (
cudf.get_option("mode.pandas_compatible")
and isinstance(self.dtype, pd.ArrowDtype)
)
):
return super().to_pandas(nullable=nullable, arrow_type=arrow_type)
else:
return pd.Index(self.to_arrow().tolist(), dtype="object")
@cached_property
def memory_usage(self) -> int:
n = super().memory_usage
for child in self.children:
n += child.memory_usage
return n
def element_indexing(self, index: int) -> dict[Any, Any] | None:
result = super().element_indexing(index)
if isinstance(result, pa.Scalar):
py_element = maybe_nested_pa_scalar_to_py(result)
return self.dtype._recursively_replace_fields(py_element) # type: ignore[union-attr]
return result
def _cast_setitem_value(self, value: Any) -> plc.Scalar:
if isinstance(value, dict):
new_value = {
field: _maybe_na_to_none(value.get(field, None))
for field in self.dtype.fields # type: ignore[union-attr]
}
return pa_scalar_to_plc_scalar(
pa.scalar(new_value, type=self.dtype.to_arrow()) # type: ignore[union-attr]
)
elif value is None or value is cudf.NA:
return pa_scalar_to_plc_scalar(
pa.scalar(None, type=self.dtype.to_arrow()) # type: ignore[union-attr]
)
else:
raise ValueError(
f"Can not set {type(value).__name__} into StructColumn"
)
def copy(self, deep: bool = True) -> Self:
# Since struct columns are immutable, both deep and
# shallow copies share the underlying device data and mask.
return super().copy(deep=False)
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
raise NotImplementedError(
"Structs are not yet supported via `__cuda_array_interface__`"
)
def _with_type_metadata(
self: StructColumn, dtype: DtypeObj
) -> StructColumn:
from cudf.core.column import IntervalColumn
from cudf.core.dtypes import IntervalDtype
# Check IntervalDtype first because it's a subclass of StructDtype
if isinstance(dtype, IntervalDtype):
new_children = [
child.astype(dtype.subtype).plc_column
for child in self.base_children
]
new_plc_column = plc.Column(
plc.DataType(plc.TypeId.STRUCT),
self.plc_column.size(),
self.plc_column.data(),
self.plc_column.null_mask(),
self.plc_column.null_count(),
self.plc_column.offset(),
new_children,
)
return IntervalColumn(
plc_column=new_plc_column,
size=self.size,
dtype=dtype,
offset=self.offset,
null_count=self.null_count,
exposed=False,
)
elif isinstance(dtype, StructDtype):
new_children = [
self.base_children[i]
._with_type_metadata(dtype.fields[f])
.plc_column
for i, f in enumerate(dtype.fields.keys())
]
new_plc_column = plc.Column(
plc.DataType(plc.TypeId.STRUCT),
self.plc_column.size(),
self.plc_column.data(),
self.plc_column.null_mask(),
self.plc_column.null_count(),
self.plc_column.offset(),
new_children,
)
return StructColumn(
plc_column=new_plc_column,
size=self.size,
dtype=dtype,
offset=self.offset,
null_count=self.null_count,
exposed=False,
)
# For pandas dtypes, store them directly in the column's dtype property
elif isinstance(dtype, pd.ArrowDtype) and isinstance(
dtype.pyarrow_dtype, pa.StructType
):
self._dtype = dtype
return self
| StructColumn |
python | sympy__sympy | sympy/assumptions/predicates/ntheory.py | {
"start": 931,
"end": 1520
} | class ____(Predicate):
"""
Composite number predicate.
Explanation
===========
``ask(Q.composite(x))`` is true iff ``x`` is a positive integer and has
at least one positive divisor other than ``1`` and the number itself.
Examples
========
>>> from sympy import Q, ask
>>> ask(Q.composite(0))
False
>>> ask(Q.composite(1))
False
>>> ask(Q.composite(2))
False
>>> ask(Q.composite(20))
True
"""
name = 'composite'
handler = Dispatcher("CompositeHandler", doc="Handler for key 'composite'.")
| CompositePredicate |
python | astropy__astropy | astropy/coordinates/representation/cartesian.py | {
"start": 257,
"end": 11481
} | class ____(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : unit-like
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `~astropy.coordinates.CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CartesianDifferential` instance, or a dictionary of
`~astropy.coordinates.CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"x": u.Quantity, "y": u.Quantity, "z": u.Quantity}
_xyz = None
def __init__(
self, x, y=None, z=None, unit=None, xyz_axis=None, differentials=None, copy=True
):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in "OV":
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
elif (
isinstance(x, CartesianRepresentation)
and unit is None
and xyz_axis is None
):
if differentials is None:
differentials = x._differentials
super().__init__(x, differentials=differentials, copy=copy)
return
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError(
"xyz_axis should only be set if x, y, and z are in a single array"
" passed in through x, i.e., y and z should not be not given."
)
if y is None or z is None:
raise ValueError(
f"x, y, and z are required to instantiate {self.__class__.__name__}"
)
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (
self._x.unit.is_equivalent(self._y.unit)
and self._x.unit.is_equivalent(self._z.unit)
):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
o = np.broadcast_to(0.0 * u.one, self.shape, subok=True)
return {
"x": CartesianRepresentation(l, o, o, copy=False),
"y": CartesianRepresentation(o, l, o, copy=False),
"z": CartesianRepresentation(o, o, l, copy=False),
}
def scale_factors(self):
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"x": l, "y": l, "z": l}
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return np.stack([self._x, self._y, self._z], axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : ndarray
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# erfa rxp: Multiply a p-vector by an r-matrix.
p = erfa_ufunc.rxp(matrix, self.get_xyz(xyz_axis=-1))
# transformed representation
rep = self.__class__(p, xyz_axis=-1, copy=False)
# Handle differentials attached to this representation
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = (self, other_c) if not reverse else (other_c, self)
return self.__class__(
*(
op(getattr(first, component), getattr(second, component))
for component in first.components
)
)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
# erfa pm: Modulus of p-vector.
return erfa_ufunc.pm(self.get_xyz(xyz_axis=-1))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("mean")
return self._apply("mean", *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials("sum")
return self._apply("sum", *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"can only take dot product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pdp: p-vector inner (=scalar=dot) product.
return erfa_ufunc.pdp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation` subclass instance
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials("cross")
try:
other_c = other.to_cartesian()
except Exception as err:
raise TypeError(
"cannot only take cross product with another "
f"representation, not a {type(other)} instance."
) from err
# erfa pxp: p-vector outer (=vector=cross) product.
sxo = erfa_ufunc.pxp(self.get_xyz(xyz_axis=-1), other_c.get_xyz(xyz_axis=-1))
return self.__class__(sxo, xyz_axis=-1)
| CartesianRepresentation |
python | huggingface__transformers | src/transformers/models/vilt/modeling_vilt.py | {
"start": 2584,
"end": 9442
} | class ____(nn.Module):
"""
Construct the text and patch embeddings.
Text embeddings are equivalent to BERT embeddings.
Patch embeddings are equivalent to ViT embeddings.
"""
def __init__(self, config):
super().__init__()
# text embeddings
self.text_embeddings = TextEmbeddings(config)
# patch embeddings
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.patch_embeddings = ViltPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
# modality type (text/patch) embeddings
self.token_type_embeddings = nn.Embedding(config.modality_type_vocab_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.config = config
def visual_embed(self, pixel_values, pixel_mask, max_image_length=200):
_, _, ph, pw = self.patch_embeddings.projection.weight.shape
x = self.patch_embeddings(pixel_values)
x_mask = pixel_mask[:, None, :, :].float()
x_mask = nn.functional.interpolate(x_mask, size=(x.shape[2], x.shape[3])).long()
x_h = x_mask[:, 0].sum(dim=1)[:, 0]
x_w = x_mask[:, 0].sum(dim=2)[:, 0]
batch_size, num_channels, height, width = x.shape
patch_dim = self.config.image_size // self.config.patch_size
spatial_pos = self.position_embeddings[:, 1:, :].transpose(1, 2).view(1, num_channels, patch_dim, patch_dim)
pos_embed = torch.cat(
[
nn.functional.pad(
nn.functional.interpolate(
spatial_pos,
size=(h, w),
mode="bilinear",
align_corners=True,
),
(0, width - w, 0, height - h),
)
for h, w in zip(x_h, x_w)
],
dim=0,
)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
x = x.flatten(2).transpose(1, 2)
# Set `device` here, otherwise `patch_index` will always be on `CPU` and will fail near the end for torch>=1.13
patch_index = torch.stack(
meshgrid(torch.arange(x_mask.shape[-2]), torch.arange(x_mask.shape[-1]), indexing="ij"), dim=-1
).to(device=x_mask.device)
patch_index = patch_index[None, None, :, :, :]
patch_index = patch_index.expand(x_mask.shape[0], x_mask.shape[1], -1, -1, -1)
patch_index = patch_index.flatten(1, 3)
x_mask = x_mask.flatten(1)
if max_image_length < 0 or max_image_length is None or not isinstance(max_image_length, int):
# suppose aug is 800 x 1333, then, maximum effective res is 800 x 1333 (if one side gets bigger, the other will be constrained and be shrunk)
# (800 // self.patch_size) * (1333 // self.patch_size) is the maximum number of patches that single image can get.
# if self.patch_size = 32, 25 * 41 = 1025
# if res is 384 x 640, 12 * 20 = 240
effective_resolution = x_h * x_w
max_image_length = effective_resolution.max()
else:
effective_resolution = x_h * x_w
max_image_length = min(effective_resolution.max(), max_image_length)
valid_idx = x_mask.nonzero(as_tuple=False)
non_valid_idx = (1 - x_mask).nonzero(as_tuple=False)
unique_rows = valid_idx[:, 0].unique()
valid_row_idx = [valid_idx[valid_idx[:, 0] == u] for u in unique_rows]
non_valid_row_idx = [non_valid_idx[non_valid_idx[:, 0] == u] for u in unique_rows]
valid_nums = [v.size(0) for v in valid_row_idx]
non_valid_nums = [v.size(0) for v in non_valid_row_idx]
pad_nums = [max_image_length - v for v in valid_nums]
select = []
for i, (v, nv, p) in enumerate(zip(valid_nums, non_valid_nums, pad_nums)):
if p <= 0:
valid_choice = torch.multinomial(torch.ones(v).float(), max_image_length)
select.append(valid_row_idx[i][valid_choice])
else:
pad_choice = torch.multinomial(torch.ones(nv).float(), p, replacement=True)
select.append(torch.cat([valid_row_idx[i], non_valid_row_idx[i][pad_choice]], dim=0))
select = torch.cat(select, dim=0)
x = x[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
x_mask = x_mask[select[:, 0], select[:, 1]].view(batch_size, -1)
# `patch_index` should be on the same device as `select`, which is ensured at definition time.
patch_index = patch_index[select[:, 0], select[:, 1]].view(batch_size, -1, 2)
pos_embed = pos_embed[select[:, 0], select[:, 1]].view(batch_size, -1, num_channels)
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
pos_embed = torch.cat(
(self.position_embeddings[:, 0, :][:, None, :].expand(batch_size, -1, -1), pos_embed), dim=1
)
x = x + pos_embed
x = self.dropout(x)
x_mask = torch.cat([torch.ones(x_mask.shape[0], 1).to(x_mask), x_mask], dim=1)
return x, x_mask, (patch_index, (height, width))
def forward(
self,
input_ids,
attention_mask,
token_type_ids,
pixel_values,
pixel_mask,
inputs_embeds,
image_embeds,
image_token_type_idx=1,
):
# PART 1: text embeddings
text_embeds = self.text_embeddings(
input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
# PART 2: patch embeddings (with interpolated position encodings)
if image_embeds is None:
image_embeds, image_masks, patch_index = self.visual_embed(
pixel_values, pixel_mask, max_image_length=self.config.max_image_length
)
else:
image_masks = pixel_mask.flatten(1)
# PART 3: add modality type embeddings
# 0 indicates text, 1 indicates image, 2 is optionally used when a second image is provided (NLVR2)
if image_token_type_idx is None:
image_token_type_idx = 1
text_embeds = text_embeds + self.token_type_embeddings(
torch.zeros_like(attention_mask, dtype=torch.long, device=text_embeds.device)
)
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx, dtype=torch.long, device=text_embeds.device)
)
# PART 4: concatenate
embeddings = torch.cat([text_embeds, image_embeds], dim=1)
masks = torch.cat([attention_mask, image_masks], dim=1)
return embeddings, masks
| ViltEmbeddings |
python | pypa__setuptools | setuptools/tests/test_build.py | {
"start": 542,
"end": 798
} | class ____(Command):
"""Dummy command to be used in tests"""
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
raise NotImplementedError("just to check if the command runs")
| Subcommand |
python | tiangolo__fastapi | scripts/notify_translations.py | {
"start": 2469,
"end": 2529
} | class ____(BaseModel):
data: CommentsData
| CommentsResponse |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/config_generator/test_input_converter.py | {
"start": 401,
"end": 1969
} | class ____:
@pytest.mark.parametrize("hf_token", [None, "hf_bac"])
@pytest.mark.parametrize("remote_storage_uri", [None, "s3://test-uri"])
@pytest.mark.parametrize("gpu_type", ALL_GPU_TYPES)
@pytest.mark.parametrize("tensor_parallelism", [1, 2, 8])
@pytest.mark.parametrize(
"lora_config",
[None, TextCompletionLoraModelConfig(max_num_lora_per_replica=24)],
)
@pytest.mark.parametrize(
"reference_model_id", [None, "meta-llama/Meta-Llama-3.1-8B-Instruct"]
)
def test_model(
self,
hf_token: Optional[str],
remote_storage_uri: Optional[str],
gpu_type: GPUType,
tensor_parallelism: int,
lora_config: Optional[TextCompletionLoraModelConfig],
reference_model_id: Optional[str],
):
model = convert_inputs_to_text_completion_model(
model_id=_MODEL_ID,
hf_token=hf_token,
remote_storage_uri=remote_storage_uri,
gpu_type=gpu_type,
lora_config=lora_config,
tensor_parallelism=tensor_parallelism,
reference_model_id=reference_model_id,
)
assert model.id == _MODEL_ID
assert model.hf_token == hf_token
assert model.remote_storage_uri == remote_storage_uri
assert model.gpu_type.value == gpu_type.value
assert model.tensor_parallelism == tensor_parallelism
assert model.reference_model_id == reference_model_id
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| TestTextCompletionModelConverter |
python | python-pillow__Pillow | src/PIL/ImageFilter.py | {
"start": 4765,
"end": 5563
} | class ____(MultibandFilter):
"""Blurs the image with a sequence of extended box filters, which
approximates a Gaussian kernel. For details on accuracy see
<https://www.mia.uni-saarland.de/Publications/gwosdek-ssvm11.pdf>
:param radius: Standard deviation of the Gaussian kernel. Either a sequence of two
numbers for x and y, or a single number for both.
"""
name = "GaussianBlur"
def __init__(self, radius: float | Sequence[float] = 2) -> None:
self.radius = radius
def filter(self, image: _imaging.ImagingCore) -> _imaging.ImagingCore:
xy = self.radius
if isinstance(xy, (int, float)):
xy = (xy, xy)
if xy == (0, 0):
return image.copy()
return image.gaussian_blur(xy)
| GaussianBlur |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 98792,
"end": 100239
} | class ____(Request):
"""
Get the list of frameworks used in the company models
:param projects: The list of projects which models will be analyzed. If not
passed or empty then all the company and public models will be analyzed
:type projects: Sequence[str]
"""
_service = "models"
_action = "get_frameworks"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"projects": {
"description": "The list of projects which models will be analyzed. If not passed or empty then all the company and public models will be analyzed",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, projects: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetFrameworksRequest, self).__init__(**kwargs)
self.projects = projects
@schema_property("projects")
def projects(self) -> Optional[List[str]]:
return self._property_projects
@projects.setter
def projects(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
| GetFrameworksRequest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vision.py | {
"start": 60451,
"end": 63529
} | class ____(GoogleCloudBaseOperator):
"""
Detect Document Text in the image.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudVisionDetectImageLabelsOperator`
:param image: (Required) The image to analyze. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.Image
:param max_results: Number of results to return.
:param retry: (Optional) A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:param timeout: Number of seconds before timing out.
:param additional_properties: Additional properties to be set on the AnnotateImageRequest. See more:
https://googleapis.github.io/google-cloud-python/latest/vision/gapic/v1/types.html#google.cloud.vision_v1.types.AnnotateImageRequest
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
# [START vision_detect_labels_template_fields]
template_fields: Sequence[str] = (
"image",
"max_results",
"timeout",
"gcp_conn_id",
"impersonation_chain",
)
# [END vision_detect_labels_template_fields]
def __init__(
self,
image: dict | Image,
max_results: int | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
additional_properties: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image = image
self.max_results = max_results
self.retry = retry
self.timeout = timeout
self.gcp_conn_id = gcp_conn_id
self.additional_properties = additional_properties
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudVisionHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
return hook.label_detection(
image=self.image,
max_results=self.max_results,
retry=self.retry,
timeout=self.timeout,
additional_properties=self.additional_properties,
)
| CloudVisionDetectImageLabelsOperator |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy_test.py | {
"start": 27234,
"end": 29272
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['eager'], required_gpus=1))
def testKeepLogicalDevice(self):
gpus = tf_config.list_physical_devices('GPU')
if len(gpus) > 1:
self.skipTest('Skip logical device test on multi GPUs, since partial GPU '
'virtualization is not permitted.')
# Cannot change logical device after the context initialization.
context._reset_context() # pylint: disable=protected-access
cluster_spec = multi_worker_test_base.create_cluster_spec(
has_chief=False, num_workers=1)
resolver = cluster_resolver_lib.SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type='worker',
task_id=0)
logical_gpus = len(gpus) * 2
for i, device in enumerate(gpus):
n = (i + 1) * logical_gpus // len(gpus) - i * logical_gpus // len(gpus)
assert n > 0 # guaranteed if count >= len(devices)
configs = []
for ordinal in range(n):
config = context.LogicalDeviceConfiguration(
memory_limit=64,
experimental_device_ordinal=ordinal)
configs.append(config)
tf_config.set_logical_device_configuration(device, configs)
collective_all_reduce_strategy.CollectiveAllReduceStrategy(
cluster_resolver=resolver)
# Since we create two logical GPUs out of the last GPU, there should be one
# more logical GPUs than physical GPUs.
self.assertLen(tf_config.list_logical_devices('GPU'), logical_gpus)
context._reset_context() # pylint: disable=protected-access
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
],
mode=['eager']))
| LogicalDeviceTest |
python | scrapy__scrapy | tests/CrawlerRunner/custom_loop_same.py | {
"start": 206,
"end": 699
} | class ____(Spider):
name = "no_request"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": "uvloop.Loop",
}
async def start(self):
return
yield
def main(reactor):
configure_logging()
runner = CrawlerRunner()
return runner.crawl(NoRequestsSpider)
install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor", "uvloop.Loop")
react(main)
| NoRequestsSpider |
python | getsentry__sentry | tests/sentry/search/events/builder/test_metrics.py | {
"start": 115899,
"end": 119523
} | class ____(MetricBuilderBaseTest):
def test_histogram_columns_set_on_builder(self) -> None:
builder = HistogramMetricQueryBuilder(
params=self.params,
dataset=Dataset.PerformanceMetrics,
query="",
selected_columns=[
"histogram(transaction.duration)",
"histogram(measurements.lcp)",
"histogram(measurements.fcp) as test",
],
histogram_params=HistogramParams(
5,
100,
0,
1, # not used by Metrics
),
)
self.assertCountEqual(
builder.histogram_aliases,
[
"histogram_transaction_duration",
"histogram_measurements_lcp",
"test",
],
)
def test_get_query(self) -> None:
self.store_transaction_metric(
100,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
aggregation_option=AggregationOption.HIST,
)
self.store_transaction_metric(
100,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
aggregation_option=AggregationOption.HIST,
)
self.store_transaction_metric(
450,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
aggregation_option=AggregationOption.HIST,
)
query = HistogramMetricQueryBuilder(
params=self.params,
dataset=Dataset.PerformanceMetrics,
query="",
selected_columns=["histogram(transaction.duration)"],
histogram_params=HistogramParams(
5,
100,
0,
1, # not used by Metrics
),
)
snql_query = query.run_query("test_query")
assert len(snql_query["data"]) == 1
# This data is intepolated via rebucket_histogram
assert snql_query["data"][0]["histogram_transaction_duration"] == [
(0.0, 100.0, 0),
(100.0, 200.0, 2),
(200.0, 300.0, 1),
(300.0, 400.0, 1),
(400.0, 500.0, 1),
]
def test_query_normal_distribution(self) -> None:
for i in range(5):
for _ in range((5 - abs(i - 2)) ** 2):
self.store_transaction_metric(
100 * i + 50,
tags={"transaction": "foo_transaction"},
timestamp=self.start + datetime.timedelta(minutes=5),
aggregation_option=AggregationOption.HIST,
)
query = HistogramMetricQueryBuilder(
params=self.params,
query="",
dataset=Dataset.PerformanceMetrics,
selected_columns=["histogram(transaction.duration)"],
histogram_params=HistogramParams(
5,
100,
0,
1, # not used by Metrics
),
)
snql_query = query.run_query("test_query")
assert len(snql_query["data"]) == 1
# This data is intepolated via rebucket_histogram
assert snql_query["data"][0]["histogram_transaction_duration"] == [
(0.0, 100.0, 10),
(100.0, 200.0, 17),
(200.0, 300.0, 23),
(300.0, 400.0, 17),
(400.0, 500.0, 10),
]
| HistogramMetricQueryBuilderTest |
python | scikit-image__scikit-image | src/skimage/measure/fit.py | {
"start": 5842,
"end": 14726
} | class ____(_BaseModel):
"""Total least squares estimator for N-dimensional lines.
In contrast to ordinary least squares line estimation, this estimator
minimizes the orthogonal distances of points to the estimated line.
Lines are defined by a point (origin) and a unit vector (direction)
according to the following vector equation::
X = origin + lambda * direction
Parameters
----------
origin : array-like, shape (N,)
Coordinates of line origin in N dimensions.
direction : array-like, shape (N,)
Vector giving line direction.
Raises
------
ValueError
If length of `origin` and `direction` differ.
Examples
--------
>>> x = np.linspace(1, 2, 25)
>>> y = 1.5 * x + 3
>>> lm = LineModelND.from_estimate(np.stack([x, y], axis=-1))
>>> lm.origin
array([1.5 , 5.25])
>>> lm.direction # doctest: +FLOAT_CMP
array([0.5547 , 0.83205])
>>> res = lm.residuals(np.stack([x, y], axis=-1))
>>> np.abs(np.round(res, 9))
array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
>>> np.round(lm.predict_y(x[:5]), 3)
array([4.5 , 4.562, 4.625, 4.688, 4.75 ])
>>> np.round(lm.predict_x(y[:5]), 3)
array([1. , 1.042, 1.083, 1.125, 1.167])
"""
def _args_init(self, origin, direction):
"""Initialize ``LineModelND`` instance.
Parameters
----------
origin : array-like, shape (N,)
Coordinates of line origin in N dimensions.
direction : array-like, shape (N,)
Vector giving line direction.
"""
self.origin, self.direction = self._check_init_values(origin, direction)
def _check_init_values(self, origin, direction):
origin, direction = (np.array(v) for v in (origin, direction))
if len(origin) != len(direction):
raise ValueError('Direction vector should be same length as origin point.')
return origin, direction
def _params2init_values(self, params):
if len(params) != 2:
raise ValueError('Input `params` should be length 2')
return self._check_init_values(*params)
@property
@deprecate_func(
deprecated_version=_PARAMS_DEP_START,
removed_version=_PARAMS_DEP_STOP,
hint='`params` attribute deprecated; use ``origin, direction`` attributes instead',
)
def params(self):
"""Return model attributes as ``origin, direction`` tuple."""
return self.origin, self.direction
@classmethod
def from_estimate(cls, data):
"""Estimate line model from data.
This minimizes the sum of shortest (orthogonal) distances
from the given data points to the estimated line.
Parameters
----------
data : (N, dim) array
N points in a space of dimensionality dim >= 2.
Returns
-------
model : Self or `~.FailedEstimation`
An instance of the line model if the estimation succeeded.
Otherwise, we return a special ``FailedEstimation`` object to
signal a failed estimation. Testing the truth value of the failed
estimation object will return ``False``. E.g.
.. code-block:: python
model = LineModelND.from_estimate(...)
if not model:
raise RuntimeError(f"Failed estimation: {model}")
"""
return super().from_estimate(data)
def _estimate(self, data, warn_only=True):
_check_data_atleast_2D(data)
origin = data.mean(axis=0)
data = data - origin
if data.shape[0] == 2: # well determined
direction = data[1] - data[0]
norm = np.linalg.norm(direction)
if norm != 0: # this should not happen to be norm 0
direction /= norm
elif data.shape[0] > 2: # over-determined
# Note: with full_matrices=1 Python dies with joblib parallel_for.
_, _, v = np.linalg.svd(data, full_matrices=False)
direction = v[0]
else: # under-determined
return 'estimate under-determined'
self.origin = origin
self.direction = direction
return None
@_deprecate_model_params
def residuals(self, data, params=DEPRECATED):
"""Determine residuals of data to model.
For each point, the shortest (orthogonal) distance to the line is
returned. It is obtained by projecting the data onto the line.
Parameters
----------
data : (N, dim) array
N points in a space of dimension dim.
Returns
-------
residuals : (N,) array
Residual for each data point.
Other parameters
----------------
params : `~.DEPRECATED`, optional
Optional custom parameter set in the form (`origin`, `direction`).
.. deprecated:: {{ start_version }}
"""
_check_data_atleast_2D(data)
origin, direction = self._get_init_values(params)
if len(origin) != data.shape[1]:
raise ValueError(
f'`origin` is {len(origin)}D, but `data` is {data.shape[1]}D'
)
res = (data - origin) - ((data - origin) @ direction)[
..., np.newaxis
] * direction
return np.linalg.norm(res, axis=1)
@_deprecate_model_params
def predict(self, x, axis=0, params=DEPRECATED):
"""Predict intersection of line model with orthogonal hyperplane.
Parameters
----------
x : (n, 1) array
Coordinates along an axis.
axis : int
Axis orthogonal to the hyperplane intersecting the line.
Returns
-------
data : (n, m) array
Predicted coordinates.
Other parameters
----------------
params : `~.DEPRECATED`, optional
Optional custom parameter set in the form (`origin`, `direction`).
.. deprecated:: {{ start_version }}
Raises
------
ValueError
If the line is parallel to the given axis.
"""
origin, direction = self._get_init_values(params)
if direction[axis] == 0:
# line parallel to axis
raise ValueError(f'Line parallel to axis {axis}')
l = (x - origin[axis]) / direction[axis]
data = origin + l[..., np.newaxis] * direction
return data
@_deprecate_model_params
def predict_x(self, y, params=DEPRECATED):
"""Predict x-coordinates for 2D lines using the estimated model.
Alias for::
predict(y, axis=1)[:, 0]
Parameters
----------
y : array
y-coordinates.
Returns
-------
x : array
Predicted x-coordinates.
Other parameters
----------------
params : `~.DEPRECATED`, optional
Optional custom parameter set in the form (`origin`, `direction`).
.. deprecated:: {{ start_version }}
"""
# Avoid triggering deprecationwarning in predict.
tf = (
self
if (params is None or params is DEPRECATED)
else type(self)(*self._params2init_values(params))
)
x = tf.predict(y, axis=1)[:, 0]
return x
@_deprecate_model_params
def predict_y(self, x, params=DEPRECATED):
"""Predict y-coordinates for 2D lines using the estimated model.
Alias for::
predict(x, axis=0)[:, 1]
Parameters
----------
x : array
x-coordinates.
Returns
-------
y : array
Predicted y-coordinates.
Other parameters
----------------
params : `~.DEPRECATED`, optional
Optional custom parameter set in the form (`origin`, `direction`).
.. deprecated:: {{ start_version }}
"""
# Avoid triggering deprecationwarning in predict.
tf = (
self
if (params is None or params is DEPRECATED)
else type(self)(*self._params2init_values(params))
)
y = tf.predict(x, axis=0)[:, 1]
return y
@_deprecate_estimate
def estimate(self, data):
"""Estimate line model from data.
This minimizes the sum of shortest (orthogonal) distances
from the given data points to the estimated line.
Parameters
----------
data : (N, dim) array
N points in a space of dimensionality ``dim >= 2``.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
return self._estimate(data) is None
@_deprecate_no_args
| LineModelND |
python | scipy__scipy | scipy/optimize/tests/test__remove_redundancy.py | {
"start": 6344,
"end": 6446
} | class ____(RRCommonTests):
def rr(self, A, b):
return _remove_redundancy_svd(A, b)
| TestRRSVD |
python | doocs__leetcode | solution/0700-0799/0716.Max Stack/Solution.py | {
"start": 156,
"end": 801
} | class ____:
def __init__(self):
self.head = Node()
self.tail = Node()
self.head.next = self.tail
self.tail.prev = self.head
def append(self, val) -> Node:
node = Node(val)
node.next = self.tail
node.prev = self.tail.prev
self.tail.prev = node
node.prev.next = node
return node
@staticmethod
def remove(node) -> Node:
node.prev.next = node.next
node.next.prev = node.prev
return node
def pop(self) -> Node:
return self.remove(self.tail.prev)
def peek(self):
return self.tail.prev.val
| DoubleLinkedList |
python | numpy__numpy | tools/swig/test/testSuperTensor.py | {
"start": 307,
"end": 12039
} | class ____(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
print(self.typeStr, "... ", file=sys.stderr)
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2 * 2 * 2 * 2,
dtype=self.typeCode).reshape((2, 2, 2, 2))
# Note: cludge to get an answer of the same type as supertensor.
# Answer is simply sqrt(sum(supertensor*supertensor)/16)
answer = np.array([np.sqrt(np.sum(supertensor.astype('d') * supertensor) / 16.)], dtype=self.typeCode)[0] # noqa: E501
self.assertAlmostEqual(norm(supertensor), answer, 6)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
print(self.typeStr, "... ", file=sys.stderr)
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]],
[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]]
self.assertRaises(BadListError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
print(self.typeStr, "... ", file=sys.stderr)
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
print(self.typeStr, "... ", file=sys.stderr)
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(3 * 2 * 2, dtype=self.typeCode).reshape((3, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
print(self.typeStr, "... ", file=sys.stderr)
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
print(self.typeStr, "... ", file=sys.stderr)
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
self.assertEqual(max(supertensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
print(self.typeStr, "... ", file=sys.stderr)
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]],
[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]]
self.assertRaises(BadListError, max, supertensor)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print(self.typeStr, "... ", file=sys.stderr)
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print(self.typeStr, "... ", file=sys.stderr)
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, -1, 2, -3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
print(self.typeStr, "... ", file=sys.stderr)
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]],
[[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
self.assertEqual(min(supertensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
print(self.typeStr, "... ", file=sys.stderr)
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]],
[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]]
self.assertRaises(BadListError, min, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print(self.typeStr, "... ", file=sys.stderr)
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print(self.typeStr, "... ", file=sys.stderr)
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1, 3], [5, 7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print(self.typeStr, "... ", file=sys.stderr)
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.arange(3 * 3 * 3 * 3,
dtype=self.typeCode).reshape((3, 3, 3, 3))
answer = supertensor.copy() * 4
scale(supertensor, 4)
self.assertEqual((supertensor == answer).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print(self.typeStr, "... ", file=sys.stderr)
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c')
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print(self.typeStr, "... ", file=sys.stderr)
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1],
[0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print(self.typeStr, "... ", file=sys.stderr)
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1]],
[[1, 0], [0, 1], [1, 0]]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print(self.typeStr, "... ", file=sys.stderr)
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
print(self.typeStr, "... ", file=sys.stderr)
supertensor = np.arange(2 * 2 * 2 * 2,
dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer < 4] = 4
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
floor(supertensor, 4)
np.testing.assert_array_equal(supertensor, answer)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", file=sys.stderr)
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.ones(2 * 2 * 2 * 2, dtype='c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
print(self.typeStr, "... ", file=sys.stderr)
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print(self.typeStr, "... ", file=sys.stderr)
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
print(self.typeStr, "... ", file=sys.stderr)
supertensor = np.arange(2 * 2 * 2 * 2,
dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer > 5] = 5
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
ceil(supertensor, 5)
np.testing.assert_array_equal(supertensor, answer)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
print(self.typeStr, "... ", file=sys.stderr)
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.ones(2 * 2 * 2 * 2, 'c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print(self.typeStr, "... ", file=sys.stderr)
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print(self.typeStr, "... ", file=sys.stderr)
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2 * 2 * 2 * 2,
dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist()
self.assertRaises(TypeError, ceil, supertensor)
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print(self.typeStr, "... ", file=sys.stderr)
luSplit = SuperTensor.__dict__[self.typeStr + "LUSplit"]
supertensor = np.ones(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]] # noqa: E501
answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]] # noqa: E501
lower, upper = luSplit(supertensor)
self.assertEqual((lower == answer_lower).all(), True)
self.assertEqual((upper == answer_upper).all(), True)
######################################################################
| SuperTensorTestCase |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/links/test_step_function.py | {
"start": 1229,
"end": 2487
} | class ____(BaseAwsLinksTestCase):
link_class = StateMachineDetailsLink
@pytest.mark.parametrize(
("state_machine_arn", "expected_url"),
[
pytest.param("", "", id="empty-arn"),
pytest.param(None, "", id="arn-not-set"),
pytest.param(
"foo:bar",
"https://console.aws.amazon.com/states/home?region=eu-west-1#/statemachines/view/foo%3Abar",
id="arn-set",
),
],
)
def test_extra_link(self, state_machine_arn, expected_url: str, mock_supervisor_comms):
if AIRFLOW_V_3_0_PLUS and mock_supervisor_comms:
mock_supervisor_comms.send.return_value = XComResult(
key=self.link_class.key,
value={
"region_name": "eu-west-1",
"aws_domain": self.link_class.get_aws_domain("aws"),
"aws_partition": "aws",
"state_machine_arn": state_machine_arn,
},
)
self.assert_extra_link_url(
expected_url=expected_url,
region_name="eu-west-1",
aws_partition="aws",
state_machine_arn=state_machine_arn,
)
| TestStateMachineDetailsLink |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 95794,
"end": 101346
} | class ____(TransformedPath):
"""
A `TransformedPatchPath` caches a non-affine transformed copy of the
`~.patches.Patch`. This cached copy is automatically updated when the
non-affine part of the transform or the patch changes.
"""
def __init__(self, patch):
"""
Parameters
----------
patch : `~.patches.Patch`
"""
# Defer to TransformedPath.__init__.
super().__init__(patch.get_path(), patch.get_transform())
self._patch = patch
def _revalidate(self):
patch_path = self._patch.get_path()
# Force invalidation if the patch path changed; otherwise, let base
# class check invalidation.
if patch_path != self._path:
self._path = patch_path
self._transformed_path = None
super()._revalidate()
def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True):
"""
Modify the endpoints of a range as needed to avoid singularities.
Parameters
----------
vmin, vmax : float
The initial endpoints.
expander : float, default: 0.001
Fractional amount by which *vmin* and *vmax* are expanded if
the original interval is too small, based on *tiny*.
tiny : float, default: 1e-15
Threshold for the ratio of the interval to the maximum absolute
value of its endpoints. If the interval is smaller than
this, it will be expanded. This value should be around
1e-15 or larger; otherwise the interval will be approaching
the double precision resolution limit.
increasing : bool, default: True
If True, swap *vmin*, *vmax* if *vmin* > *vmax*.
Returns
-------
vmin, vmax : float
Endpoints, expanded and/or swapped if necessary.
If either input is inf or NaN, or if both inputs are 0 or very
close to zero, it returns -*expander*, *expander*.
"""
if (not np.isfinite(vmin)) or (not np.isfinite(vmax)):
return -expander, expander
swapped = False
if vmax < vmin:
vmin, vmax = vmax, vmin
swapped = True
# Expand vmin, vmax to float: if they were integer types, they can wrap
# around in abs (abs(np.int8(-128)) == -128) and vmax - vmin can overflow.
vmin, vmax = map(float, [vmin, vmax])
maxabsvalue = max(abs(vmin), abs(vmax))
if maxabsvalue < (1e6 / tiny) * np.finfo(float).tiny:
vmin = -expander
vmax = expander
elif vmax - vmin <= maxabsvalue * tiny:
if vmax == 0 and vmin == 0:
vmin = -expander
vmax = expander
else:
vmin -= expander*abs(vmin)
vmax += expander*abs(vmax)
if swapped and not increasing:
vmin, vmax = vmax, vmin
return vmin, vmax
def interval_contains(interval, val):
"""
Check, inclusively, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
if a > b:
a, b = b, a
return a <= val <= b
def _interval_contains_close(interval, val, rtol=1e-10):
"""
Check, inclusively, whether an interval includes a given value, with the
interval expanded by a small tolerance to admit floating point errors.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
rtol : float, default: 1e-10
Relative tolerance slippage allowed outside of the interval.
For an interval ``[a, b]``, values
``a - rtol * (b - a) <= val <= b + rtol * (b - a)`` are considered
inside the interval.
Returns
-------
bool
Whether *val* is within the *interval* (with tolerance).
"""
a, b = interval
if a > b:
a, b = b, a
rtol = (b - a) * rtol
return a - rtol <= val <= b + rtol
def interval_contains_open(interval, val):
"""
Check, excluding endpoints, whether an interval includes a given value.
Parameters
----------
interval : (float, float)
The endpoints of the interval.
val : float
Value to check is within interval.
Returns
-------
bool
Whether *val* is within the *interval*.
"""
a, b = interval
return a < val < b or a > val > b
def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'):
"""
Return a new transform with an added offset.
Parameters
----------
trans : `Transform` subclass
Any transform, to which offset will be applied.
fig : `~matplotlib.figure.Figure`, default: None
Current figure. It can be None if *units* are 'dots'.
x, y : float, default: 0.0
The offset to apply.
units : {'inches', 'points', 'dots'}, default: 'inches'
Units of the offset.
Returns
-------
`Transform` subclass
Transform with applied offset.
"""
_api.check_in_list(['dots', 'points', 'inches'], units=units)
if units == 'dots':
return trans + Affine2D().translate(x, y)
if fig is None:
raise ValueError('For units of inches or points a fig kwarg is needed')
if units == 'points':
x /= 72.0
y /= 72.0
# Default units are 'inches'
return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
| TransformedPatchPath |
python | cython__cython | docs/examples/userguide/extension_types/extendable_animal.py | {
"start": 15,
"end": 162
} | class ____:
number_of_legs: cython.int
def __cinit__(self, number_of_legs: cython.int):
self.number_of_legs = number_of_legs
| Animal |
python | kamyu104__LeetCode-Solutions | Python/non-overlapping-intervals.py | {
"start": 33,
"end": 473
} | class ____(object):
def eraseOverlapIntervals(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key=lambda interval: interval[1])
result, right = 0, float("-inf")
for l, r in intervals:
if l < right:
result += 1
else:
right = r
return result
# Time: O(nlogn)
# Space: O(1)
| Solution |
python | sphinx-doc__sphinx | sphinx/domains/cpp/__init__.py | {
"start": 18052,
"end": 19373
} | class ____(SphinxDirective):
"""This directive is just to tell Sphinx that we're documenting stuff in
namespace foo.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
root_symbol = self.env.domaindata['cpp']['root_symbol']
if self.arguments[0].strip() in {'NULL', '0', 'nullptr'}:
symbol = root_symbol
stack: list[Symbol] = []
else:
parser = DefinitionParser(
self.arguments[0], location=self.get_location(), config=self.config
)
try:
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
logger.warning(e, location=self.get_location())
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
symbol = root_symbol.add_name(ast.nestedName, ast.templatePrefix)
stack = [symbol]
self.env.current_document.cpp_parent_symbol = symbol
self.env.current_document.cpp_namespace_stack = stack
self.env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
| CPPNamespaceObject |
python | pyqtgraph__pyqtgraph | pyqtgraph/widgets/TreeWidget.py | {
"start": 13168,
"end": 14599
} | class ____(object):
"""Wrapper around a TreeWidget's invisible root item that calls
TreeWidget.informTreeWidgetChange when child items are added/removed.
"""
def __init__(self, item):
self._real_item = item
def addChild(self, child):
self._real_item.addChild(child)
TreeWidget.informTreeWidgetChange(child)
def addChildren(self, childs):
self._real_item.addChildren(childs)
for child in childs:
TreeWidget.informTreeWidgetChange(child)
def insertChild(self, index, child):
self._real_item.insertChild(index, child)
TreeWidget.informTreeWidgetChange(child)
def insertChildren(self, index, childs):
self._real_item.addChildren(index, childs)
for child in childs:
TreeWidget.informTreeWidgetChange(child)
def removeChild(self, child):
self._real_item.removeChild(child)
TreeWidget.informTreeWidgetChange(child)
def takeChild(self, index):
child = self._real_item.takeChild(index)
TreeWidget.informTreeWidgetChange(child)
return child
def takeChildren(self):
childs = self._real_item.takeChildren()
for child in childs:
TreeWidget.informTreeWidgetChange(child)
return childs
def __getattr__(self, attr):
return getattr(self._real_item, attr)
| InvisibleRootItem |
python | getsentry__sentry | tests/sentry/users/models/test_userrole.py | {
"start": 234,
"end": 529
} | class ____(TestCase):
def setUp(self) -> None:
manage_default_super_admin_role()
def test_creates_super_admin_role(self) -> None:
role = UserRole.objects.get(name="Super Admin")
assert sorted(role.permissions) == sorted(settings.SENTRY_USER_PERMISSIONS)
| UserRoleTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.