language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
Textualize__textual
|
tests/test_widget_removing.py
|
{
"start": 4803,
"end": 8442
}
|
class ____(App):
def compose(self) -> ComposeResult:
yield Button("ABC")
yield Label("Outside of vertical.")
with Vertical():
for index in range(5):
yield Label(str(index))
async def test_widget_remove_children_container():
app = ExampleApp()
async with app.run_test():
container = app.query_one(Vertical)
# 6 labels in total, with 5 of them inside the container.
assert len(app.query(Label)) == 6
assert len(container.children) == 5
await container.remove_children()
# The labels inside the container are gone, and the 1 outside remains.
assert len(app.query(Label)) == 1
assert len(container.children) == 0
async def test_widget_remove_children_with_star_selector():
app = ExampleApp()
async with app.run_test():
container = app.query_one(Vertical)
# 6 labels in total, with 5 of them inside the container.
assert len(app.query(Label)) == 6
assert len(container.children) == 5
await container.remove_children("*")
# The labels inside the container are gone, and the 1 outside remains.
assert len(app.query(Label)) == 1
assert len(container.children) == 0
async def test_widget_remove_children_with_string_selector():
app = ExampleApp()
async with app.run_test():
container = app.query_one(Vertical)
# 6 labels in total, with 5 of them inside the container.
assert len(app.query(Label)) == 6
assert len(container.children) == 5
await app.screen.remove_children("Label")
# Only the Screen > Label widget is gone, everything else remains.
assert len(app.query(Button)) == 1
assert len(app.query(Vertical)) == 1
assert len(app.query(Label)) == 5
async def test_widget_remove_children_with_type_selector():
app = ExampleApp()
async with app.run_test():
assert len(app.query(Button)) == 1 # Sanity check.
await app.screen.remove_children(Button)
assert len(app.query(Button)) == 0
async def test_widget_remove_children_with_selector_does_not_leak():
app = ExampleApp()
async with app.run_test():
container = app.query_one(Vertical)
# 6 labels in total, with 5 of them inside the container.
assert len(app.query(Label)) == 6
assert len(container.children) == 5
await container.remove_children("Label")
# The labels inside the container are gone, and the 1 outside remains.
assert len(app.query(Label)) == 1
assert len(container.children) == 0
async def test_widget_remove_children_no_children():
app = ExampleApp()
async with app.run_test():
button = app.query_one(Button)
count_before = len(app.query("*"))
await button.remove_children()
count_after = len(app.query("*"))
assert len(app.query(Button)) == 1 # The button still remains.
assert (
count_before == count_after
) # No widgets have been removed, since Button has no children.
async def test_widget_remove_children_no_children_match_selector():
app = ExampleApp()
async with app.run_test():
container = app.query_one(Vertical)
assert len(container.query("Button")) == 0 # Sanity check.
count_before = len(app.query("*"))
container_children_before = list(container.children)
await container.remove_children("Button")
assert count_before == len(app.query("*"))
assert container_children_before == list(container.children)
|
ExampleApp
|
python
|
apache__airflow
|
airflow-core/tests/unit/lineage/test_hook.py
|
{
"start": 33673,
"end": 34362
}
|
class ____(plugins_manager.AirflowPlugin):
name = "FakePluginHavingHookLineageCollector"
hook_lineage_readers = [HookLineageReader]
@pytest.mark.parametrize(
("has_readers", "expected_class"),
[
(True, HookLineageCollector),
(False, NoOpCollector),
],
)
def test_get_hook_lineage_collector(has_readers, expected_class):
# reset cached instance
hook.get_hook_lineage_collector.cache_clear()
plugins = [FakePlugin()] if has_readers else []
with mock_plugin_manager(plugins=plugins):
assert isinstance(get_hook_lineage_collector(), expected_class)
assert get_hook_lineage_collector() is get_hook_lineage_collector()
|
FakePlugin
|
python
|
ray-project__ray
|
release/ray_release/tests/test_config.py
|
{
"start": 8606,
"end": 13819
}
|
class ____:
def test_does_not_mutate_original(self):
test_definition = {"name": "test-{{arg}}"}
substituted = _substitute_variable(test_definition, "arg", "1")
assert substituted is not test_definition
assert test_definition == {"name": "test-{{arg}}"}
def test_substitute_variable_in_string(self):
test_definition = {"name": "test-{{arg}}"}
substituted = _substitute_variable(test_definition, "arg", "1")
assert substituted == {"name": "test-1"}
def test_substitute_variable_in_list(self):
test_definition = {"items": ["item-{{arg}}"]}
substituted = _substitute_variable(test_definition, "arg", "1")
assert substituted == {"items": ["item-1"]}
def test_substitute_variable_in_dict(self):
test_definition = {"outer": {"inner": "item-{{arg}}"}}
substituted = _substitute_variable(test_definition, "arg", "1")
assert substituted == {"outer": {"inner": "item-1"}}
def test_schema_validation():
test = VALID_TEST.copy()
schema = load_schema_file()
assert not validate_test(Test(**test), schema)
# Remove some optional arguments
del test["alert"]
del test["python"]
del test["run"]["wait_for_nodes"]
del test["cluster"]["autosuspend_mins"]
assert not validate_test(Test(**test), schema)
# Add some faulty arguments
# Faulty frequency
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["frequency"] = "invalid"
assert validate_test(invalid_test, schema)
# Faulty job type
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["run"]["type"] = "invalid"
assert validate_test(invalid_test, schema)
# Faulty file manager type
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["run"]["file_manager"] = "invalid"
assert validate_test(invalid_test, schema)
# Faulty smoke test
invalid_test = Test(**copy.deepcopy(VALID_TEST))
del invalid_test["smoke_test"]["frequency"]
assert validate_test(invalid_test, schema)
# Faulty Python version
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["python"] = "invalid"
assert validate_test(invalid_test, schema)
# Faulty BYOD type
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["cluster"]["byod"]["type"] = "invalid"
assert validate_test(invalid_test, schema)
# Faulty BYOD and Python version match
invalid_test = Test(**copy.deepcopy(VALID_TEST))
invalid_test["cluster"]["byod"]["type"] = "gpu"
invalid_test["python"] = "3.11"
assert validate_test(invalid_test, schema)
def test_compute_config_invalid_ebs():
compute_config = {
"aws": {
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": 1000,
},
}
]
}
}
assert validate_cluster_compute(compute_config)
compute_config["aws"]["BlockDeviceMappings"][0]["Ebs"][
"DeleteOnTermination"
] = False
assert validate_cluster_compute(compute_config)
compute_config["aws"]["BlockDeviceMappings"][0]["Ebs"]["DeleteOnTermination"] = True
assert not validate_cluster_compute(compute_config)
compute_config["head_node_type"] = {}
compute_config["head_node_type"]["aws_advanced_configurations"] = {
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": 1000,
},
}
]
}
assert validate_cluster_compute(compute_config)
compute_config["head_node_type"]["aws_advanced_configurations"][
"BlockDeviceMappings"
][0]["Ebs"]["DeleteOnTermination"] = False
assert validate_cluster_compute(compute_config)
compute_config["head_node_type"]["aws_advanced_configurations"][
"BlockDeviceMappings"
][0]["Ebs"]["DeleteOnTermination"] = True
assert not validate_cluster_compute(compute_config)
compute_config["worker_node_types"] = [{}]
compute_config["worker_node_types"][0]["aws_advanced_configurations"] = {
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"VolumeSize": 1000,
},
}
]
}
assert validate_cluster_compute(compute_config)
compute_config["worker_node_types"][0]["aws_advanced_configurations"][
"BlockDeviceMappings"
][0]["Ebs"]["DeleteOnTermination"] = False
assert validate_cluster_compute(compute_config)
compute_config["worker_node_types"][0]["aws_advanced_configurations"][
"BlockDeviceMappings"
][0]["Ebs"]["DeleteOnTermination"] = True
assert not validate_cluster_compute(compute_config)
def test_load_and_validate_test_collection_file():
tests = read_and_validate_release_test_collection(_TEST_COLLECTION_FILES)
assert [test for test in tests if test.get_name() == "test_name"]
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
TestSubstituteVariable
|
python
|
django__django
|
django/db/models/functions/math.py
|
{
"start": 4063,
"end": 4416
}
|
class ____(NumericOutputFieldMixin, Transform):
function = "RADIANS"
lookup_name = "radians"
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
template="((%%(expressions)s) * %s / 180)" % math.pi,
**extra_context,
)
|
Radians
|
python
|
ethereum__web3.py
|
web3/_utils/filters.py
|
{
"start": 6179,
"end": 7633
}
|
class ____(Filter):
data_filter_set = None
data_filter_set_regex = None
data_filter_set_function = None
log_entry_formatter = None
filter_params: FilterParams = None
builder: EventFilterBuilder = None
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.log_entry_formatter = kwargs.pop(
"log_entry_formatter",
self.log_entry_formatter,
)
if "data_filter_set" in kwargs:
self.set_data_filters(kwargs.pop("data_filter_set"))
super().__init__(*args, **kwargs)
def format_entry(self, entry: LogReceipt) -> LogReceipt:
if self.log_entry_formatter:
return self.log_entry_formatter(entry)
return entry
def set_data_filters(
self, data_filter_set: Collection[tuple[TypeStr, Any]]
) -> None:
"""
Sets the data filters (non indexed argument filters)
Expects a set of tuples with the type and value, e.g.:
(('uint256', [12345, 54321]), ('string', ('a-single-string',)))
"""
self.data_filter_set = data_filter_set
if any(data_filter_set):
self.data_filter_set_function = match_fn(
self.eth_module.codec, data_filter_set
)
def is_valid_entry(self, entry: LogReceipt) -> bool:
if not self.data_filter_set:
return True
return bool(self.data_filter_set_function(entry["data"]))
|
LogFilter
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_attributes.py
|
{
"start": 1491,
"end": 5307
}
|
class ____(fixtures.MappedTest):
def _scalar_obj_fixture(self):
class A:
pass
class B:
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
_register_attribute(A, "b", uselist=False, useobject=True)
return A, B
def _collection_obj_fixture(self):
class A:
pass
class B:
pass
instrumentation.register_class(A)
instrumentation.register_class(B)
_register_attribute(A, "b", uselist=True, useobject=True)
return A, B
def test_scalar_obj_remove_invalid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b is b1
assert_raises_message(
ValueError,
"Object <B at .*?> not "
"associated with <A at .*?> on attribute 'b'",
A.b.impl.remove,
attributes.instance_state(a1),
attributes.instance_dict(a1),
b2,
None,
)
def test_scalar_obj_pop_invalid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b is b1
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b2,
None,
)
assert a1.b is b1
def test_scalar_obj_pop_valid(self):
A, B = self._scalar_obj_fixture()
a1 = A()
b1 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b is b1
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b is None
def test_collection_obj_remove_invalid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b == [b1]
assert_raises_message(
ValueError,
r"list.remove\(.*?\): .* not in list",
A.b.impl.remove,
attributes.instance_state(a1),
attributes.instance_dict(a1),
b2,
None,
)
def test_collection_obj_pop_invalid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
b2 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b == [b1]
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b2,
None,
)
assert a1.b == [b1]
def test_collection_obj_pop_valid(self):
A, B = self._collection_obj_fixture()
a1 = A()
b1 = B()
A.b.impl.append(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b == [b1]
A.b.impl.pop(
attributes.instance_state(a1),
attributes.instance_dict(a1),
b1,
None,
)
assert a1.b == []
|
AttributeImplAPITest
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pylint/unnecessary_dunder_call.py
|
{
"start": 1346,
"end": 2621
}
|
class ____:
def __init__(self, v):
self.v = v
def __add__(self, other):
self.v += other
return self
def get_v(self):
return self.v
foo = Foo(1)
foo.__add__(2).get_v() # PLC2801
# Lambda expressions
blah = lambda: a.__add__(1) # PLC2801
# If expressions
print(a.__add__(1) if a > 0 else a.__sub__(1)) # PLC2801
# Dict/Set/List/Tuple
print({"a": a.__add__(1)}) # PLC2801
print({a.__add__(1)}) # PLC2801
print([a.__add__(1)]) # PLC2801
print((a.__add__(1),)) # PLC2801
# Comprehension variants
print({i: i.__add__(1) for i in range(5)}) # PLC2801
print({i.__add__(1) for i in range(5)}) # PLC2801
print([i.__add__(1) for i in range(5)]) # PLC2801
print((i.__add__(1) for i in range(5))) # PLC2801
# Generators
gen = (i.__add__(1) for i in range(5)) # PLC2801
print(next(gen))
# Subscripts
print({"a": a.__add__(1)}["a"]) # PLC2801
# https://github.com/astral-sh/ruff/issues/15745
print("x".__add__("y")[0]) # PLC2801
# Starred
print(*[a.__add__(1)]) # PLC2801
list1 = [1, 2, 3]
list2 = [4, 5, 6]
print([*list1.__add__(list2)]) # PLC2801
# Slices
print([a.__add__(1), a.__sub__(1)][0:1]) # PLC2801
# Attribute access
# https://github.com/astral-sh/ruff/issues/15745
print(1j.__add__(1.0).real) # PLC2801
|
Foo
|
python
|
pypa__pip
|
src/pip/_vendor/pkg_resources/__init__.py
|
{
"start": 115429,
"end": 115555
}
|
class ____(_packaging_requirements.InvalidRequirement):
"Compatibility wrapper for InvalidRequirement"
|
RequirementParseError
|
python
|
allegroai__clearml
|
clearml/backend_api/session/jsonmodels/validators.py
|
{
"start": 5871,
"end": 6393
}
|
class ____(object):
"""Validator for enums."""
def __init__(self, *choices: Any) -> None:
"""Init.
:param [] choices: Valid choices for the field.
"""
self.choices = list(choices)
def validate(self, value: Any) -> None:
if value not in self.choices:
tpl = "Value '{val}' is not a valid choice."
raise ValidationError(tpl.format(val=value))
def modify_schema(self, field_schema: dict) -> None:
field_schema["enum"] = self.choices
|
Enum
|
python
|
doocs__leetcode
|
solution/1700-1799/1775.Equal Sum Arrays With Minimum Number of Operations/Solution2.py
|
{
"start": 0,
"end": 532
}
|
class ____:
def minOperations(self, nums1: List[int], nums2: List[int]) -> int:
s1, s2 = sum(nums1), sum(nums2)
if s1 == s2:
return 0
if s1 > s2:
return self.minOperations(nums2, nums1)
cnt = Counter([6 - v for v in nums1] + [v - 1 for v in nums2])
d = s2 - s1
ans = 0
for i in range(5, 0, -1):
while cnt[i] and d > 0:
d -= i
cnt[i] -= 1
ans += 1
return ans if d <= 0 else -1
|
Solution
|
python
|
ray-project__ray
|
rllib/examples/centralized_critic.py
|
{
"start": 8236,
"end": 8918
}
|
class ____(CentralizedValueMixin, PPOTorchPolicy):
def __init__(self, observation_space, action_space, config):
PPOTorchPolicy.__init__(self, observation_space, action_space, config)
CentralizedValueMixin.__init__(self)
@override(PPOTorchPolicy)
def loss(self, model, dist_class, train_batch):
return loss_with_central_critic(self, super(), model, dist_class, train_batch)
@override(PPOTorchPolicy)
def postprocess_trajectory(
self, sample_batch, other_agent_batches=None, episode=None
):
return centralized_critic_postprocessing(
self, sample_batch, other_agent_batches, episode
)
|
CCPPOTorchPolicy
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/dviread.py
|
{
"start": 24722,
"end": 32196
}
|
class ____:
"""
Encapsulation of a font that a DVI file can refer to.
This class holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
Parameters
----------
scale : float
Factor by which the font is scaled from its natural size.
metrics : Tfm | TtfMetrics
TeX font metrics for this font
texname : bytes
Name of the font as used internally in the DVI file, as an ASCII
bytestring. This is usually very different from any external font
names; `PsfontsMap` can be used to find the external name of the font.
vf : Vf
A TeX "virtual font" file, or None if this font is not virtual.
Attributes
----------
texname : bytes
fname : str
Compatibility shim so that DviFont can be used with
``_backend_pdf_ps.CharacterTracker``; not a real filename.
size : float
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
"""
def __init__(self, scale, metrics, texname, vf):
_api.check_isinstance(bytes, texname=texname)
self._scale = scale
self._metrics = metrics
self.texname = texname
self._vf = vf
self._path = None
self._encoding = None
@classmethod
def from_luatex(cls, scale, texname):
path_b, sep, rest = texname[1:].rpartition(b"]")
if not (texname.startswith(b"[") and sep and rest[:1] in [b"", b":"]):
raise ValueError(f"Invalid modern font name: {texname}")
# utf8 on Windows, not utf16!
path = path_b.decode("utf8") if os.name == "nt" else os.fsdecode(path_b)
subfont = 0
effects = {}
if rest[1:]:
for kv in rest[1:].decode("ascii").split(";"):
key, val = kv.split("=", 1)
if key == "index":
subfont = val
elif key in ["embolden", "slant", "extend"]:
effects[key] = int(val) / 65536
else:
_log.warning("Ignoring invalid key-value pair: %r", kv)
metrics = TtfMetrics(path)
font = cls(scale, metrics, texname, vf=None)
font._path = Path(path)
font.subfont = subfont
font.effects = effects
return font
@classmethod
def from_xetex(cls, scale, texname, subfont, effects):
# utf8 on Windows, not utf16!
path = texname.decode("utf8") if os.name == "nt" else os.fsdecode(texname)
metrics = TtfMetrics(path)
font = cls(scale, metrics, b"[" + texname + b"]", vf=None)
font._path = Path(path)
font.subfont = subfont
font.effects = effects
return font
size = property(lambda self: self._scale * (72.0 / (72.27 * 2**16)))
widths = _api.deprecated("3.11")(property(lambda self: [
(1000 * self._tfm.width.get(char, 0)) >> 20
for char in range(max(self._tfm.width, default=-1) + 1)]))
@property
def fname(self):
"""A fake filename"""
return self.texname.decode('latin-1')
def _get_fontmap(self, string):
"""Get the mapping from characters to the font that includes them.
Each value maps to self; there is no fallback mechanism for DviFont.
"""
return {char: self for char in string}
def __eq__(self, other):
return (type(self) is type(other)
and self.texname == other.texname and self.size == other.size)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return f"<{type(self).__name__}: {self.texname}>"
def _width_of(self, char):
"""Width of char in dvi units."""
metrics = self._metrics.get_metrics(char)
if metrics is None:
_log.debug('No width for char %d in font %s.', char, self.texname)
return 0
return _mul1220(metrics.tex_width, self._scale)
def _height_depth_of(self, char):
"""Height and depth of char in dvi units."""
metrics = self._metrics.get_metrics(char)
if metrics is None:
_log.debug('No metrics for char %d in font %s', char, self.texname)
return [0, 0]
hd = [
_mul1220(metrics.tex_height, self._scale),
_mul1220(metrics.tex_depth, self._scale),
]
# cmsyXX (symbols font) glyph 0 ("minus") has a nonzero descent
# so that TeX aligns equations properly
# (https://tex.stackexchange.com/q/526103/)
# but we actually care about the rasterization depth to align
# the dvipng-generated images.
if re.match(br'^cmsy\d+$', self.texname) and char == 0:
hd[-1] = 0
return hd
def resolve_path(self):
if self._path is None:
fontmap = PsfontsMap(find_tex_file("pdftex.map"))
try:
psfont = fontmap[self.texname]
except LookupError as exc:
try:
find_tex_file(f"{self.texname.decode('ascii')}.mf")
except FileNotFoundError:
raise exc from None
else:
self._path = Path(find_tex_file(
f"{self.texname.decode('ascii')}.600pk"))
else:
if psfont.filename is None:
raise ValueError("No usable font file found for {} ({}); "
"the font may lack a Type-1 version"
.format(psfont.psname.decode("ascii"),
psfont.texname.decode("ascii")))
self._path = Path(psfont.filename)
return self._path
@cached_property
def subfont(self):
return 0
@cached_property
def effects(self):
if self.resolve_path().match("*.600pk"):
return {}
return PsfontsMap(find_tex_file("pdftex.map"))[self.texname].effects
def _index_dvi_to_freetype(self, idx):
"""Convert dvi glyph indices to FreeType ones."""
# Glyphs indices stored in the dvi file map to FreeType glyph indices
# (i.e., which can be passed to FT_Load_Glyph) in various ways:
# - for xetex & luatex "native fonts", dvi indices are directly equal
# to FreeType indices.
# - if pdftex.map specifies an ".enc" file for the font, that file maps
# dvi indices to Adobe glyph names, which can then be converted to
# FreeType glyph indices with FT_Get_Name_Index.
# - if no ".enc" file is specified, then the font must be a Type 1
# font, and dvi indices directly index into the font's CharStrings
# vector.
if self.texname.startswith(b"["):
return idx
if self._encoding is None:
face = font_manager.get_font(self.resolve_path())
psfont = PsfontsMap(find_tex_file("pdftex.map"))[self.texname]
if psfont.encoding:
self._encoding = [face.get_name_index(name)
for name in _parse_enc(psfont.encoding)]
else:
self._encoding = face._get_type1_encoding_vector()
return self._encoding[idx]
|
DviFont
|
python
|
numba__numba
|
numba/core/datamodel/models.py
|
{
"start": 32687,
"end": 33315
}
|
class ____(StructModel):
def __init__(self, dmm, fe_type, need_indices):
assert fe_type.array_type.layout == 'C'
array_type = fe_type.array_type
dtype = array_type.dtype
ndim = array_type.ndim
members = [('array', array_type),
('stride', types.intp),
('index', types.EphemeralPointer(types.intp)),
]
if need_indices:
# For ndenumerate()
members.append(('indices', types.EphemeralArray(types.intp, ndim)))
super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
|
CContiguousFlatIter
|
python
|
openai__openai-python
|
src/openai/resources/chat/completions/completions.py
|
{
"start": 161026,
"end": 161991
}
|
class ____:
def __init__(self, completions: AsyncCompletions) -> None:
self._completions = completions
self.parse = _legacy_response.async_to_raw_response_wrapper(
completions.parse,
)
self.create = _legacy_response.async_to_raw_response_wrapper(
completions.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
completions.retrieve,
)
self.update = _legacy_response.async_to_raw_response_wrapper(
completions.update,
)
self.list = _legacy_response.async_to_raw_response_wrapper(
completions.list,
)
self.delete = _legacy_response.async_to_raw_response_wrapper(
completions.delete,
)
@cached_property
def messages(self) -> AsyncMessagesWithRawResponse:
return AsyncMessagesWithRawResponse(self._completions.messages)
|
AsyncCompletionsWithRawResponse
|
python
|
hyperopt__hyperopt
|
hyperopt/tests/unit/test_rdists.py
|
{
"start": 456,
"end": 1733
}
|
class ____(unittest.TestCase):
def test_cdf_logcdf(self):
check_cdf_logcdf(loguniform_gen(0, 1), (0, 1), "")
check_cdf_logcdf(loguniform_gen(0, 1), (-5, 5), "")
def test_cdf_ppf(self):
check_cdf_ppf(loguniform_gen(0, 1), (0, 1), "")
check_cdf_ppf(loguniform_gen(-2, 1), (-5, 5), "")
def test_pdf_logpdf(self):
check_pdf_logpdf(loguniform_gen(0, 1), (0, 1), "")
check_pdf_logpdf(loguniform_gen(low=-4, high=-0.5), (-2, 1), "")
def test_pdf(self):
check_pdf(loguniform_gen(0, 1), (0, 1), "")
check_pdf(loguniform_gen(low=-4, high=-2), (-3, 2), "")
def test_distribution_rvs(self):
alpha = 0.01
loc = 0
scale = 1
arg = (loc, scale)
distfn = loguniform_gen(0, 1)
if packaging.version.Version(
importlib.metadata.version("scipy")
) >= packaging.version.Version("1.12.0"):
D, pval = stats.kstest(distfn.rvs(), distfn.cdf, args=arg, N=1000)
else:
D, pval = stats.kstest(distfn.rvs, distfn.cdf, args=arg, N=1000)
if pval < alpha:
npt.assert_(
pval > alpha,
f"D = {D:f}; pval = {pval:f}; alpha = {alpha:f}; args={arg}",
)
|
TestLogUniform
|
python
|
doocs__leetcode
|
solution/1200-1299/1260.Shift 2D Grid/Solution.py
|
{
"start": 0,
"end": 360
}
|
class ____:
def shiftGrid(self, grid: List[List[int]], k: int) -> List[List[int]]:
m, n = len(grid), len(grid[0])
ans = [[0] * n for _ in range(m)]
for i, row in enumerate(grid):
for j, v in enumerate(row):
x, y = divmod((i * n + j + k) % (m * n), n)
ans[x][y] = v
return ans
|
Solution
|
python
|
numba__numba
|
numba/tests/test_num_threads.py
|
{
"start": 534,
"end": 21433
}
|
class ____(TestCase):
_numba_parallel_test_ = False
def setUp(self):
# Make sure the num_threads is set to the max. This also makes sure
# the threads are launched.
set_num_threads(config.NUMBA_NUM_THREADS)
def check_mask(self, expected, result):
# There's no guarantee that TBB will use a full mask worth of
# threads if it deems it inefficient to do so
if threading_layer() == 'tbb':
self.assertTrue(np.all(result <= expected))
elif threading_layer() in ('omp', 'workqueue'):
np.testing.assert_equal(expected, result)
else:
assert 0, 'unreachable'
@skip_parfors_unsupported
def test_set_num_threads_type(self):
@njit
def foo():
set_num_threads('wrong_type')
expected = "The number of threads specified must be an integer"
for fn, errty in ((foo, TypingError), (foo.py_func, TypeError)):
with self.assertRaises(errty) as raises:
fn()
self.assertIn(expected, str(raises.exception))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic(self):
max_threads = config.NUMBA_NUM_THREADS
self.assertEqual(get_num_threads(), max_threads)
set_num_threads(2)
self.assertEqual(get_num_threads(), 2)
set_num_threads(max_threads)
self.assertEqual(get_num_threads(), max_threads)
with self.assertRaises(ValueError):
set_num_threads(0)
with self.assertRaises(ValueError):
set_num_threads(max_threads + 1)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_jit(self):
max_threads = config.NUMBA_NUM_THREADS
@njit
def get_n():
return get_num_threads()
self.assertEqual(get_n(), max_threads)
set_num_threads(2)
self.assertEqual(get_n(), 2)
set_num_threads(max_threads)
self.assertEqual(get_n(), max_threads)
@njit
def set_get_n(n):
set_num_threads(n)
return get_num_threads()
self.assertEqual(set_get_n(2), 2)
self.assertEqual(set_get_n(max_threads), max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_guvectorize(self):
max_threads = config.NUMBA_NUM_THREADS
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def get_n(x):
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
set_num_threads(2)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, 2)
set_num_threads(max_threads)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def set_get_n(n):
set_num_threads(n[0])
n[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
x[0] = 2
set_get_n(x)
np.testing.assert_equal(x, 2)
x = np.zeros((5000000,), dtype=np.int64)
x[0] = max_threads
set_get_n(x)
np.testing.assert_equal(x, max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_outside_jit(self):
# Test set_num_threads outside a jitted function
set_num_threads(2)
@njit(parallel=True)
def test_func():
x = 5
buf = np.empty((x,))
for i in prange(x):
buf[i] = get_num_threads()
return buf
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_gufunc(x):
x[:] = get_num_threads()
out = test_func()
np.testing.assert_equal(out, 2)
x = np.zeros((5000000,), dtype=np.int64)
test_gufunc(x)
np.testing.assert_equal(x, 2)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_jit(self):
# Test set_num_threads inside a jitted function
@njit(parallel=True)
def test_func(nthreads):
x = 5
buf = np.empty((x,))
set_num_threads(nthreads)
for i in prange(x):
buf[i] = get_num_threads()
return buf
mask = 2
out = test_func(mask)
np.testing.assert_equal(out, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_guvectorize(self):
# Test set_num_threads inside a jitted guvectorize function
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_func(x):
set_num_threads(x[0])
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
mask = 2
x[0] = mask
test_func(x)
np.testing.assert_equal(x, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_outside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
set_num_threads(mask)
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
x[:] = get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_inside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
set_num_threads(mask)
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
set_num_threads(mask)
x[:] = get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_1(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
mask = config.NUMBA_NUM_THREADS - 1
N = config.NUMBA_NUM_THREADS
M = 2 * config.NUMBA_NUM_THREADS
@njit(parallel=True)
def child_func(buf, fid):
M, N = buf.shape
for i in prange(N):
buf[fid, i] = get_num_threads()
def get_test(test_type):
if test_type == 'njit':
def test_func(nthreads, py_func=False):
@njit(parallel=True)
def _test_func(nthreads):
acc = 0
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# set threads in parent function
set_num_threads(local_mask)
if local_mask < N:
child_func(buf, local_mask)
acc += get_num_threads()
return acc, buf
if py_func:
return _test_func.py_func(nthreads)
else:
return _test_func(nthreads)
elif test_type == 'guvectorize':
def test_func(nthreads, py_func=False):
def _test_func(acc, buf, local_mask):
set_num_threads(nthreads)
# set threads in parent function
set_num_threads(local_mask[0])
if local_mask[0] < N:
child_func(buf, local_mask[0])
acc[0] += get_num_threads()
buf = np.zeros((M, N), dtype=np.int64)
acc = np.zeros((M, 1), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
sig = ['void(int64[:], int64[:, :], int64[:])']
layout = '(p), (n, m), (p)'
if not py_func:
_test_func = guvectorize(sig, layout, nopython=True,
target='parallel')(_test_func)
else:
_test_func = guvectorize(sig, layout,
forceobj=True)(_test_func)
_test_func(acc, buf, local_mask)
return acc, buf
return test_func
for test_type in ['njit', 'guvectorize']:
test_func = get_test(test_type)
got_acc, got_arr = test_func(mask)
exp_acc, exp_arr = test_func(mask, py_func=True)
np.testing.assert_equal(exp_acc, got_acc)
np.testing.assert_equal(exp_arr, got_arr)
# check the maths reconciles, guvectorize does not reduce, njit does
math_acc_exp = 1 + np.arange(M) % mask
if test_type == 'guvectorize':
math_acc = math_acc_exp.reshape((M, 1))
else:
math_acc = np.sum(math_acc_exp)
np.testing.assert_equal(math_acc, got_acc)
math_arr = np.zeros((M, N))
for i in range(1, N):
# there's branches on 1, ..., num_threads - 1
math_arr[i, :] = i
np.testing.assert_equal(math_arr, got_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_2(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
N = config.NUMBA_NUM_THREADS + 1
M = 4 * config.NUMBA_NUM_THREADS + 1
def get_impl(child_type, test_type):
if child_type == 'parallel':
child_dec = njit(parallel=True)
elif child_type == 'njit':
child_dec = njit(parallel=False)
elif child_type == 'none':
def child_dec(x):
return x
@child_dec
def child(buf, fid):
M, N = buf.shape
set_num_threads(fid) # set threads in child function
for i in prange(N):
buf[fid, i] = get_num_threads()
if test_type in ['parallel', 'njit', 'none']:
if test_type == 'parallel':
test_dec = njit(parallel=True)
elif test_type == 'njit':
test_dec = njit(parallel=False)
elif test_type == 'none':
def test_dec(x):
return x
@test_dec
def test_func(nthreads):
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask < config.NUMBA_NUM_THREADS:
child(buf, local_mask)
assert get_num_threads() == local_mask
return buf
else:
if test_type == 'guvectorize':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', nopython=True,
target='parallel')
elif test_type == 'guvectorize-obj':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', forceobj=True)
def test_func(nthreads):
@test_dec
def _test_func(buf, local_mask):
set_num_threads(nthreads)
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask[0] < config.NUMBA_NUM_THREADS:
child(buf, local_mask[0])
assert get_num_threads() == local_mask[0]
buf = np.zeros((M, N), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
_test_func(buf, local_mask)
return buf
return test_func
mask = config.NUMBA_NUM_THREADS - 1
res_arrays = {}
for test_type in ['parallel', 'njit', 'none',
'guvectorize', 'guvectorize-obj']:
for child_type in ['parallel', 'njit', 'none']:
if child_type == 'none' and test_type != 'none':
continue
set_num_threads(mask)
res_arrays[test_type, child_type] = get_impl(
child_type, test_type)(mask)
py_arr = res_arrays['none', 'none']
for arr in res_arrays.values():
np.testing.assert_equal(arr, py_arr)
# check the maths reconciles
math_arr = np.zeros((M, N))
# there's branches on modulo mask but only NUMBA_NUM_THREADS funcs
for i in range(1, config.NUMBA_NUM_THREADS):
math_arr[i, :] = i
np.testing.assert_equal(math_arr, py_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
# This test needs at least 3 threads to run, N>=2 for the launch, M>=N+1 for
# the nested function
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 3, "Not enough CPU cores")
def _test_nested_parallelism_3(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that the right number of threads are present in nesting
# this relies on there being a load of cores present
BIG = 1000000
@njit(parallel=True)
def work(local_nt): # arg is value 3
tid = np.zeros(BIG)
acc = 0
set_num_threads(local_nt) # set to 3 threads
for i in prange(BIG):
acc += 1
tid[i] = get_thread_id()
return acc, np.unique(tid)
@njit(parallel=True)
def test_func_jit(nthreads):
set_num_threads(nthreads) # set to 2 threads
lens = np.zeros(nthreads)
total = 0
for i in prange(nthreads):
my_acc, tids = work(nthreads + 1) # call with value 3
lens[i] = len(tids)
total += my_acc
return total, np.unique(lens)
NT = 2
expected_acc = BIG * NT
expected_thread_count = NT + 1
got_acc, got_tc = test_func_jit(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
def test_guvectorize(nthreads):
@guvectorize(['int64[:], int64[:]'],
'(n), (n)',
nopython=True,
target='parallel')
def test_func_guvectorize(total, lens):
my_acc, tids = work(nthreads + 1)
lens[0] = len(tids)
total[0] += my_acc
total = np.zeros((nthreads, 1), dtype=np.int64)
lens = np.zeros(nthreads, dtype=np.int64).reshape((nthreads, 1))
test_func_guvectorize(total, lens)
# vectorize does not reduce, so total is summed
return total.sum(), np.unique(lens)
got_acc, got_tc = test_guvectorize(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
@unittest.skipIf(not sys.platform.startswith('linux'), "Linux only")
def _test_threadmask_across_fork(self):
forkctx = multiprocessing.get_context('fork')
@njit
def foo():
return get_num_threads()
def wrap(queue):
queue.put(foo())
mask = 1
self.assertEqual(foo(), config.NUMBA_NUM_THREADS)
set_num_threads(mask)
self.assertEqual(foo(), mask)
shared_queue = forkctx.Queue()
# check TLS slot inheritance in fork
p = forkctx.Process(target=wrap, args=(shared_queue,))
p.start()
p.join()
self.assertEqual(shared_queue.get(), mask)
def tearDown(self):
set_num_threads(config.NUMBA_NUM_THREADS)
@skip_parfors_unsupported
def _test_get_thread_id_not_parallel(self):
python_get_thread_id = get_thread_id()
check_array_size = 8
@njit(parallel=False)
def par_false(size):
njit_par_false_tid = get_thread_id()
res = np.ones(size)
for i in prange(size):
res[i] = get_thread_id()
return njit_par_false_tid, res
@njit(parallel=True)
def par_true(size):
njit_par_true_tid = get_thread_id()
res = np.ones(size)
for i in range(size):
res[i] = get_thread_id()
return njit_par_true_tid, res
self.assertEqual(python_get_thread_id, 0)
njit_par_false_tid, njit_par_false_arr = par_false(check_array_size)
self.assertEqual(njit_par_false_tid, 0)
np.testing.assert_equal(njit_par_false_arr, 0)
njit_par_true_tid, njit_par_true_arr = par_true(check_array_size)
self.assertEqual(njit_par_true_tid, 0)
np.testing.assert_equal(njit_par_true_arr, 0)
|
TestNumThreads
|
python
|
weaviate__weaviate-python-client
|
weaviate/connect/event_loop.py
|
{
"start": 460,
"end": 4227
}
|
class ____:
def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
self.loop = loop
def start(self) -> None:
if self.loop is not None:
return
self.loop = self.__start_new_event_loop()
_EventLoop.patch_exception_handler(self.loop)
def run_until_complete(
self, f: Callable[P, Coroutine[Any, Any, T]], *args: P.args, **kwargs: P.kwargs
) -> T:
"""This method runs the provided coroutine in a blocking manner by scheduling its execution in an event loop running in a parallel thread.
The result of the coroutine is returned, either when the coroutine completes or raises an exception.
"""
if self.loop is None or self.loop.is_closed():
raise WeaviateClosedClientError()
fut = asyncio.run_coroutine_threadsafe(f(*args, **kwargs), self.loop)
return fut.result()
def schedule(
self, f: Callable[P, Coroutine[Any, Any, T]], *args: P.args, **kwargs: P.kwargs
) -> _Future[T]:
"""This method schedules the provided coroutine for execution in the event loop running in a parallel thread.
The coroutine will be executed asynchronously in the background.
"""
if self.loop is None or self.loop.is_closed():
raise WeaviateClosedClientError()
return cast(_Future[T], asyncio.run_coroutine_threadsafe(f(*args, **kwargs), self.loop))
def shutdown(self) -> None:
if self.loop is None:
return
self.loop.call_soon_threadsafe(self.loop.stop)
@staticmethod
def __run_event_loop(loop: asyncio.AbstractEventLoop) -> None:
try:
loop.run_forever()
finally:
# This is entered when loop.stop is scheduled from the main thread
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
@staticmethod
def __start_new_event_loop() -> asyncio.AbstractEventLoop:
loop = asyncio.new_event_loop()
event_loop = threading.Thread(
target=_EventLoop.__run_event_loop,
daemon=True,
args=(loop,),
name="eventLoop",
)
event_loop.start()
while not loop.is_running():
time.sleep(0.01)
return loop
@staticmethod
def patch_exception_handler(loop: asyncio.AbstractEventLoop) -> None:
"""This patches the asyncio exception handler.
It ignores the `BlockingIOError: [Errno 35] Resource temporarily unavailable` error
that is emitted by `aio.grpc` when multiple event loops are used in separate threads. This error is not actually an implementation/call error,
it's just a problem with grpc's cython implementation of `aio.Channel.__init__` whereby a `socket.recv(1)` call only works on the first call with
all subsequent calls to `aio.Channel.__init__` throwing the above error.
This call within the `aio.Channel.__init__` method does not affect the functionality of the library and can be safely ignored.
Context:
- https://github.com/grpc/grpc/issues/25364
- https://github.com/grpc/grpc/pull/36096
"""
def exception_handler(loop: asyncio.AbstractEventLoop, context: Dict[str, Any]) -> None:
if "exception" in context:
if type(
context["exception"]
).__name__ == "BlockingIOError" and "Resource temporarily unavailable" in str(
context["exception"]
):
return
loop.default_exception_handler(context)
loop.set_exception_handler(exception_handler)
def __del__(self) -> None:
self.shutdown()
|
_EventLoop
|
python
|
python-pillow__Pillow
|
src/PIL/BlpImagePlugin.py
|
{
"start": 7948,
"end": 9344
}
|
class ____(ImageFile.ImageFile):
"""
Blizzard Mipmap Format
"""
format = "BLP"
format_description = "Blizzard Mipmap Format"
def _open(self) -> None:
self.magic = self.fp.read(4)
if not _accept(self.magic):
msg = f"Bad BLP magic {repr(self.magic)}"
raise BLPFormatError(msg)
compression = struct.unpack("<i", self.fp.read(4))[0]
if self.magic == b"BLP1":
alpha = struct.unpack("<I", self.fp.read(4))[0] != 0
else:
encoding = struct.unpack("<b", self.fp.read(1))[0]
alpha = struct.unpack("<b", self.fp.read(1))[0] != 0
alpha_encoding = struct.unpack("<b", self.fp.read(1))[0]
self.fp.seek(1, os.SEEK_CUR) # mips
self._size = struct.unpack("<II", self.fp.read(8))
args: tuple[int, int, bool] | tuple[int, int, bool, int]
if self.magic == b"BLP1":
encoding = struct.unpack("<i", self.fp.read(4))[0]
self.fp.seek(4, os.SEEK_CUR) # subtype
args = (compression, encoding, alpha)
offset = 28
else:
args = (compression, encoding, alpha, alpha_encoding)
offset = 20
decoder = self.magic.decode()
self._mode = "RGBA" if alpha else "RGB"
self.tile = [ImageFile._Tile(decoder, (0, 0) + self.size, offset, args)]
|
BlpImageFile
|
python
|
pallets__jinja
|
src/jinja2/nodes.py
|
{
"start": 26893,
"end": 27638
}
|
class ____(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ("expr", "ops")
expr: Expr
ops: list["Operand"]
def as_const(self, eval_ctx: EvalContext | None = None) -> t.Any:
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
if not result:
return False
value = new_value
except Exception as e:
raise Impossible() from e
return result
|
Compare
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 457890,
"end": 458334
}
|
class ____(sgqlc.types.Interface):
"""Represents a type that can be retrieved by a URL."""
__schema__ = github_schema
__field_names__ = ("resource_path", "url")
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTML path to this resource."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The URL to this resource."""
|
UniformResourceLocatable
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/auth_index.py
|
{
"start": 5447,
"end": 14302
}
|
class ____(BaseAuthIndexEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
"""
Manage session authentication
Intended to be used by the internal Sentry application to handle
authentication methods from JS endpoints by relying on internal sessions
and simple HTTP authentication.
"""
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"PUT": {
RateLimitCategory.USER: RateLimit(
limit=5, window=60 * 60
), # 5 PUT requests per hour per user
}
}
)
def _validate_superuser(
self, validator: AuthVerifyValidator, request: Request, verify_authenticator: bool
) -> bool:
"""
For a superuser, they need to be validated before we can grant an active superuser session.
If the user has a password or u2f device, authenticate the password/challenge that was sent is valid.
If the user doesn't have a password or u2f device, we say they're authenticated if they have a
valid SSO session.
By nature of granting an active superuser session, we want to make sure that the user has completed
SSO and if they do not, we redirect them back to the SSO login.
"""
logger.info(
"auth-index.validate_superuser",
extra={
"user": request.user.id,
"raise_exception": not DISABLE_SSO_CHECK_FOR_LOCAL_DEV,
"verify_authenticator": verify_authenticator,
},
)
# Disable exception for missing password or u2f code if we're running locally
validator.is_valid(raise_exception=not DISABLE_SSO_CHECK_FOR_LOCAL_DEV)
authenticated = (
self._verify_user_via_inputs(validator, request)
if (not DISABLE_SSO_CHECK_FOR_LOCAL_DEV and verify_authenticator) or is_self_hosted()
else True
)
if SUPERUSER_ORG_ID:
if not has_completed_sso(request, SUPERUSER_ORG_ID):
request.session[PREFILLED_SU_MODAL_KEY] = request.data
self._reauthenticate_with_sso(request, SUPERUSER_ORG_ID)
return authenticated
def post(self, request: Request) -> Response:
"""
Authenticate a User
```````````````````
This endpoint authenticates a user using the provided credentials
through a regular HTTP basic auth system. The response contains
cookies that need to be sent with further requests that require
authentication.
This is primarily used internally in Sentry.
Common example::
curl -X ###METHOD### -u username:password ###URL###
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_400_BAD_REQUEST)
if is_demo_user(request.user):
return Response(status=status.HTTP_403_FORBIDDEN)
# If 2fa login is enabled then we cannot sign in with username and
# password through this api endpoint.
if request.user.has_2fa():
return Response(
{
"2fa_required": True,
"message": "Cannot sign-in with password authentication when 2fa is enabled.",
},
status=status.HTTP_403_FORBIDDEN,
)
try:
# Must use the real request object that Django knows about
auth.login(request._request, promote_request_rpc_user(request))
except auth.AuthUserPasswordExpired:
return Response(
{
"message": "Cannot sign-in with password authentication because password has expired."
},
status=status.HTTP_403_FORBIDDEN,
)
request.user = request._request.user
return self.get(request)
def put(self, request: Request) -> Response:
"""
Verify a User
`````````````
This endpoint verifies the currently authenticated user (for example, to gain superuser)
through 3 methods (password and u2f device (provided in the request data) and valid sso
session if the user is a superuser). If the request is from the superuser modal and the
current superuser is verified, superuser access is granted.
:auth: required
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
validator = AuthVerifyValidator(data=request.data)
if not (request.user.is_superuser and request.data.get("isSuperuserModal")):
try:
validator.is_valid(raise_exception=True)
except ValidationError:
return Response({"detail": {"code": MISSING_PASSWORD_OR_U2F_CODE}}, status=400)
authenticated = self._verify_user_via_inputs(validator, request)
else:
verify_authenticator = False
if not DISABLE_SSO_CHECK_FOR_LOCAL_DEV and not is_self_hosted():
if SUPERUSER_ORG_ID:
verify_authenticator = organization_service.check_organization_by_id(
id=SUPERUSER_ORG_ID, only_visible=False
)
if verify_authenticator:
if not Authenticator.objects.filter(
user_id=request.user.id, type=U2fInterface.type
).exists():
return Response(
{"detail": {"code": "no_u2f"}}, status=status.HTTP_403_FORBIDDEN
)
logger.info(
"auth-index.put",
extra={
"organization": SUPERUSER_ORG_ID,
"user": request.user.id,
"verify_authenticator": verify_authenticator,
},
)
try:
authenticated = self._validate_superuser(validator, request, verify_authenticator)
except ValidationError:
return Response({"detail": {"code": MISSING_PASSWORD_OR_U2F_CODE}}, status=400)
if not authenticated:
return Response({"detail": {"code": "ignore"}}, status=status.HTTP_403_FORBIDDEN)
try:
# Must use the httprequest object instead of request
auth.login(request._request, promote_request_rpc_user(request))
metrics.incr(
"sudo_modal.success",
)
except auth.AuthUserPasswordExpired:
metrics.incr(
"sudo_modal.failure",
)
return Response(
{
"code": "password-expired",
"message": "Cannot sign-in with basic auth because password has expired.",
},
status=status.HTTP_403_FORBIDDEN,
)
if request.user.is_superuser and request.data.get("isSuperuserModal"):
request.superuser.set_logged_in(request.user)
request.user = request._request.user
return self.get(request)
def delete(self, request: Request, *args, **kwargs) -> Response:
"""
Logout the Authenticated User
`````````````````````````````
Deauthenticate all active sessions for this user.
"""
# Allows demo user to log out from its current session but not others
if is_demo_user(request.user) and request.data.get("all", None) is True:
return Response(status=status.HTTP_403_FORBIDDEN)
# If there is an SLO URL, return it to frontend so the browser can redirect
# the user back to the IdP site to delete the IdP session cookie
slo_url = handle_saml_single_logout(request)
# For signals to work here, we must promote the request.user to a full user object
logout(request._request)
request.user = AnonymousUser()
# Force cookies to be deleted
response = Response()
response.delete_cookie(settings.CSRF_COOKIE_NAME, domain=settings.CSRF_COOKIE_DOMAIN)
response.delete_cookie(settings.SESSION_COOKIE_NAME, domain=settings.SESSION_COOKIE_DOMAIN)
if referrer := request.GET.get("referrer"):
analytics.record(
AuthV2DeleteLogin(
event=referrer,
)
)
if slo_url:
response.status_code = status.HTTP_200_OK
response.data = {"sloUrl": slo_url}
else:
response.status_code = status.HTTP_204_NO_CONTENT
return response
|
AuthIndexEndpoint
|
python
|
pdm-project__pdm
|
src/pdm/models/reporter.py
|
{
"start": 458,
"end": 836
}
|
class ____:
def report_download(self, link: Any, completed: int, total: int | None) -> None:
pass
def report_build_start(self, filename: str) -> None:
pass
def report_build_end(self, filename: str) -> None:
pass
def report_unpack(self, filename: Path, completed: int, total: int | None) -> None:
pass
@dataclass
|
CandidateReporter
|
python
|
huggingface__transformers
|
tests/models/esm/test_modeling_esmfold.py
|
{
"start": 6105,
"end": 8341
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
test_mismatched_shapes = False
all_model_classes = (EsmForProteinFolding,) if is_torch_available() else ()
pipeline_model_mapping = {} if is_torch_available() else {}
test_sequence_classification_problem_types = False
def setUp(self):
self.model_tester = EsmFoldModelTester(self)
self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@is_flaky(
description="The computed `s = s / norm_denom` in `EsmFoldAngleResnet` is numerically instable if `norm_denom` is very small."
)
def test_batching_equivalence(self):
super().test_batching_equivalence()
@unittest.skip(reason="Does not support attention outputs")
def test_attention_outputs(self):
pass
@unittest.skip
def test_correct_missing_keys(self):
pass
@unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_embeddings_untied(self):
pass
@unittest.skip(reason="Esm does not support embedding resizing")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="ESMFold does not support passing input embeds!")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="ESMFold does not output hidden states in the normal way.")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="ESMfold does not output hidden states in the normal way.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="ESMFold only has one output format.")
def test_model_outputs_equivalence(self):
pass
@unittest.skip(reason="ESMFold does not support input chunking.")
def test_feed_forward_chunking(self):
pass
@unittest.skip(reason="ESMFold doesn't support data parallel.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
|
EsmFoldModelTest
|
python
|
huggingface__transformers
|
src/transformers/tokenization_utils_base.py
|
{
"start": 45658,
"end": 188986
}
|
class ____(PushToHubMixin):
"""
Base class for all tokenizer backends.
"""
vocab_files_names: dict[str, str] = {}
pretrained_vocab_files_map: dict[str, dict[str, str]] = {}
_auto_class: Optional[str] = None
# first name has to correspond to main model input name
# to make sure `tokenizer.pad(...)` works correctly
model_input_names: list[str] = ["input_ids", "token_type_ids", "attention_mask"]
padding_side: str = "right"
truncation_side: str = "right"
slow_tokenizer_class = None
# Special tokens support (moved from SpecialTokensMixin)
# V5: Clean separation of named special tokens from extra special tokens
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
def __init__(self, **kwargs):
self.init_inputs = ()
for key in kwargs:
if hasattr(self, key) and callable(getattr(self, key)):
raise AttributeError(f"{key} conflicts with the method {key} in {self.__class__.__name__}")
self.init_kwargs = copy.deepcopy(kwargs)
self.name_or_path = kwargs.pop("name_or_path", "")
self._processor_class = kwargs.pop("processor_class", None)
# Store additional_special_tokens in init_kwargs before conversion for backward compatibility
additional_special_tokens_value = kwargs.pop("additional_special_tokens", None)
if "additional_special_tokens" not in self.init_kwargs:
self.init_kwargs["additional_special_tokens"] = additional_special_tokens_value
kwargs.setdefault("extra_special_tokens", additional_special_tokens_value)
self._pad_token_type_id = 0
self.verbose = kwargs.pop("verbose", False)
# V5: Separate storage for named special tokens and extra special tokens
self._special_tokens_map = dict.fromkeys(self.SPECIAL_TOKENS_ATTRIBUTES)
self._extra_special_tokens = [] # List of extra model-specific special tokens
# V5: track both explicit and auto-detected model-specific tokens
explicit_model_specific_tokens = kwargs.pop("model_specific_special_tokens", None)
if explicit_model_specific_tokens is None:
explicit_model_specific_tokens = {}
elif not isinstance(explicit_model_specific_tokens, dict):
raise TypeError("model_specific_special_tokens must be a dictionary of token name to token value")
auto_model_specific_tokens = {}
# Directly set hidden values to allow init with tokens not yet in vocab
for key in list(kwargs.keys()):
if key in self.SPECIAL_TOKENS_ATTRIBUTES:
value = kwargs.pop(key)
if value is None:
continue
if isinstance(value, (str, AddedToken)):
self._special_tokens_map[key] = value
else:
raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
elif key == "extra_special_tokens":
# V5: Support extra_special_tokens in __init__
value = kwargs.pop(key)
if value is None:
continue
# If dict: treat as model specific named special tokens (attributes)
if isinstance(value, dict):
self._set_model_specific_special_tokens(special_tokens=value)
else:
if not isinstance(value, (list, tuple)) or not all(
isinstance(t, (str, AddedToken)) for t in value
):
raise TypeError(
"extra_special_tokens must be a list/tuple of str or AddedToken, or a dict mapping names to tokens"
)
self._extra_special_tokens = list(value)
elif (
key.endswith("_token")
and key not in self.SPECIAL_TOKENS_ATTRIBUTES
and isinstance(kwargs[key], (str, AddedToken))
):
value = kwargs.pop(key)
if value is None:
continue
auto_model_specific_tokens[key] = value
# For backward compatibility we fallback to set model_max_length from max_len if provided
model_max_length = kwargs.pop("model_max_length", kwargs.pop("max_len", None))
self.model_max_length = model_max_length if model_max_length is not None else VERY_LARGE_INTEGER
self.padding_side = kwargs.pop("padding_side", self.padding_side)
if self.padding_side not in ["right", "left"]:
raise ValueError(
f"Padding side should be selected between 'right' and 'left', current value: {self.padding_side}"
)
self.truncation_side = kwargs.pop("truncation_side", self.truncation_side)
if self.truncation_side not in ["right", "left"]:
raise ValueError(
f"Truncation side should be selected between 'right' and 'left', current value: {self.truncation_side}"
)
self.model_input_names = kwargs.pop("model_input_names", self.model_input_names)
# By default, clean up tokenization spaces for both fast and slow tokenizers
self.clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
# By default, do not split special tokens for both fast and slow tokenizers
self.split_special_tokens = kwargs.pop("split_special_tokens", False)
self._in_target_context_manager = False
self.chat_template = kwargs.pop("chat_template", None)
if isinstance(self.chat_template, (list, tuple)):
# Chat templates are stored as lists of dicts with fixed key names,
# we reconstruct that into a single dict while loading them.
self.chat_template = {template["name"]: template["template"] for template in self.chat_template}
model_specific_tokens = {**auto_model_specific_tokens, **explicit_model_specific_tokens}
if model_specific_tokens:
self._set_model_specific_special_tokens(special_tokens=model_specific_tokens)
self.deprecation_warnings = {}
# Backend information (V5: tracking which backend and files were used)
self.backend = kwargs.pop("backend", None)
self.files_loaded = kwargs.pop("files_loaded", [])
def _set_processor_class(self, processor_class: str):
"""Sets processor class so it can be serialized in `tokenizer_config.json`."""
self._processor_class = processor_class
# ---- Special tokens API (moved from SpecialTokensMixin) ----
def add_special_tokens(
self,
special_tokens_dict: dict[str, Union[str, AddedToken, Sequence[Union[str, AddedToken]]]],
replace_extra_special_tokens=True,
) -> int:
"""
Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
current vocabulary).
When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the
model so that its embedding matrix matches the tokenizer.
In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.
Using `add_special_tokens` will ensure your special tokens can be used in several ways:
- Special tokens can be skipped when decoding using `skip_special_tokens = True`.
- Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`.
- You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
makes it easy to develop model-agnostic training and fine-tuning scripts.
When possible, special tokens are already registered for provided pretrained models (for instance
[`BertTokenizer`] `cls_token` is already registered to be `'[CLS]'` and XLM's one is also registered to be
`'</s>'`).
Args:
special_tokens_dict (dictionary *str* to *str*, `tokenizers.AddedToken`, or `Sequence[Union[str, AddedToken]]`):
Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
`sep_token`, `pad_token`, `cls_token`, `mask_token`, `extra_special_tokens`].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
assign the index of the `unk_token` to them).
replace_extra_special_tokens (`bool`, *optional*, defaults to `True`):
If `True`, the existing list of extra special tokens will be replaced by the list provided in
`special_tokens_dict`. Otherwise, `extra_special_tokens` will be extended. In the former
case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged
as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the
`added_tokens_encoder` and `added_tokens_decoder`. This means that the previous
`extra_special_tokens` are still added tokens, and will not be split by the model.
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
model = GPT2Model.from_pretrained("openai-community/gpt2")
special_tokens_dict = {"cls_token": "<CLS>"}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
assert tokenizer.cls_token == "<CLS>"
```"""
if not special_tokens_dict:
return 0
# V5: Allowed keys are SPECIAL_TOKENS_ATTRIBUTES + "extra_special_tokens"
# Backward compatibility: convert "additional_special_tokens" to "extra_special_tokens"
special_tokens_dict = dict(special_tokens_dict)
if "additional_special_tokens" in special_tokens_dict and "extra_special_tokens" not in special_tokens_dict:
special_tokens_dict["extra_special_tokens"] = special_tokens_dict.pop("additional_special_tokens")
allowed_keys = set(self.SPECIAL_TOKENS_ATTRIBUTES) | {"extra_special_tokens"}
tokens_to_add = []
for key, value in special_tokens_dict.items():
if key not in allowed_keys:
raise ValueError(f"Key {key} is not a valid special token. Valid keys are: {allowed_keys}")
if self.verbose:
logger.info(f"Assigning {value} to the {key} key of the tokenizer")
if key == "extra_special_tokens":
if not isinstance(value, (list, tuple)) or not all(isinstance(t, (str, AddedToken)) for t in value):
raise ValueError(f"Tokens {value} for key {key} should all be str or AddedToken instances")
new_tokens = [
(
AddedToken(t, rstrip=False, lstrip=False, normalized=False, special=True)
if isinstance(t, str)
else t
)
for t in value
if replace_extra_special_tokens or str(t) not in self.extra_special_tokens
]
if replace_extra_special_tokens and new_tokens:
self._extra_special_tokens = list(new_tokens)
else:
self._extra_special_tokens.extend(new_tokens)
tokens_to_add.extend(new_tokens)
else:
if not isinstance(value, (str, AddedToken)):
raise ValueError(f"Token {value} for key {key} should be a str or an AddedToken instance")
if isinstance(value, str):
value = AddedToken(value, rstrip=False, lstrip=False, normalized=False, special=True)
setattr(self, key, value)
tokens_to_add.append(value)
return self.add_tokens(tokens_to_add, special_tokens=True)
def add_tokens(
self, new_tokens: Union[str, AddedToken, Sequence[Union[str, AddedToken]]], special_tokens: bool = False
) -> int:
"""
#TODO remove this from here! PreTrainedTOkeniuzerBase should be agnostic of AddedToken.
Add a list of new tokens. If the new tokens are not in the vocabulary, they are added to the end. Added tokens and
tokens from the vocabulary of the tokenization algorithm are therefore not treated in the same way.
Args:
new_tokens (`str`, `tokenizers.AddedToken` or a sequence of *str* or `tokenizers.AddedToken`):
Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
token to let you personalize its behavior: whether this token should only match against a single word,
whether this token should strip all potential whitespaces on the left side, whether this token should
strip all potential whitespaces on the right side, etc.
special_tokens (`bool`, *optional*, defaults to `False`):
Specifies if the token is special. This mostly changes the normalization behavior
See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.
Returns:
`int`: Number of tokens added to the vocabulary.
Examples:
```python
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased")
model = BertModel.from_pretrained("google-bert/bert-base-uncased")
num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
print("We have added", num_added_toks, "tokens")
# Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
model.resize_token_embeddings(len(tokenizer))
```"""
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def _add_tokens(self, new_tokens: Union[list[str], list[AddedToken]], special_tokens: bool = False) -> int:
raise NotImplementedError
@property
def pad_token_type_id(self) -> int:
return self._pad_token_type_id
def __setattr__(self, key, value):
key_without_id = key
key_is_special_id = key.endswith("_id") or key.endswith("_ids")
if key_is_special_id:
key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4]
# Check if this is a named special token
if (
self.__dict__.get("_special_tokens_map", None) is not None
and key_without_id in self.SPECIAL_TOKENS_ATTRIBUTES
):
if key_is_special_id:
if value is not None:
value = self.convert_ids_to_tokens(value)
key = key_without_id
if not isinstance(value, (str, AddedToken)) and value is not None:
raise ValueError(f"Cannot set a non-string value as the {key}")
self._special_tokens_map[key] = value
# Check if this is extra_special_tokens or extra_special_tokens_ids
elif self.__dict__.get("_extra_special_tokens", None) is not None and key_without_id == "extra_special_tokens":
if key_is_special_id:
if value is not None:
value = [self.convert_ids_to_tokens(val) for val in value]
key = key_without_id
if key == "extra_special_tokens":
if value is None:
self._extra_special_tokens = []
elif isinstance(value, dict):
# Dict is treated as model-specific special tokens (such as multimodal tokens)
self._set_model_specific_special_tokens(special_tokens=value)
elif isinstance(value, (list, tuple)):
self._extra_special_tokens = list(value)
else:
raise ValueError(f"extra_special_tokens must be a list, tuple, or dict, got {type(value)}")
else:
super().__setattr__(key, value)
def __getattr__(self, key):
key_without_id = key
key_is_special_id = key.endswith("_id") or key.endswith("_ids")
if key_is_special_id:
key_without_id = key[:-3] if not key.endswith("_ids") else key[:-4]
# Check if this is a named special token
if (
self.__dict__.get("_special_tokens_map", None) is not None
and key_without_id in self.SPECIAL_TOKENS_ATTRIBUTES
):
_special_tokens_map = self.__dict__["_special_tokens_map"]
if not key_is_special_id:
if _special_tokens_map[key_without_id] is None:
if self.verbose:
logger.error(f"Using {key}, but it is not set yet.")
return None
value = _special_tokens_map[key_without_id]
return str(value)
else:
attr_as_tokens = getattr(self, key_without_id)
return self.convert_tokens_to_ids(attr_as_tokens) if attr_as_tokens is not None else None
# Check if this is extra_special_tokens or extra_special_tokens_ids
elif key_without_id == "extra_special_tokens":
if self.__dict__.get("_extra_special_tokens", None) is not None:
if not key_is_special_id:
return [str(tok) for tok in self.__dict__["_extra_special_tokens"]]
else:
# extra_special_tokens_ids
tokens = self.__dict__["_extra_special_tokens"]
return self.convert_tokens_to_ids([str(tok) for tok in tokens]) if tokens else []
if key not in self.__dict__:
raise AttributeError(f"{self.__class__.__name__} has no attribute {key}")
else:
return super().__getattr__(key)
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added.
For fast tokenizers, data collators call this with `already_has_special_tokens=True` to build a mask over an
already-formatted sequence. In that case, we compute the mask by checking membership in `all_special_ids`.
Args:
token_ids_0: List of IDs for the (possibly already formatted) sequence.
token_ids_1: Unused when `already_has_special_tokens=True`. Must be None in that case.
already_has_special_tokens: Whether the sequence is already formatted with special tokens.
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of ids is already formatted "
"with special tokens for the model."
)
special_ids = set(self.all_special_ids)
return [1 if int(tid) in special_ids else 0 for tid in token_ids_0]
# Default base implementation for non-formatted sequences is not provided here.
# Concrete tokenizer classes should override this for their specific formatting rules.
raise NotImplementedError(
f"{self.__class__.__name__} does not implement get_special_tokens_mask for non-formatted sequences"
)
@property
def special_tokens_map(self) -> dict[str, str]:
"""
`dict[str, str]`: A flat dictionary mapping named special token attributes to their string values.
Only includes the standard named special tokens (bos_token, eos_token, etc.), not extra_special_tokens.
This provides a clean, flat structure without mixed types.
Returns:
A dictionary with keys like 'bos_token', 'eos_token', etc., and string values.
**V5 Change**: This now returns only named tokens. Use `extra_special_tokens` for the additional tokens.
"""
return {
attr: str(self._special_tokens_map[attr])
for attr in self.SPECIAL_TOKENS_ATTRIBUTES
if self._special_tokens_map.get(attr) is not None
}
# Note: extra_special_tokens and extra_special_tokens_ids are handled by __getattr__ and __setattr__
# We don't define them as @property to keep the implementation simpler
@property
def all_special_tokens(self) -> list[str]:
"""
`list[str]`: A list of all unique special tokens (named + extra) as strings.
Includes both named special tokens (bos_token, eos_token, etc.) and extra special tokens.
Converts tokens of `tokenizers.AddedToken` type to string.
"""
seen = set()
all_toks = []
# Add named special tokens
for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
value = self._special_tokens_map.get(attr)
if value is not None:
token_str = str(value)
if token_str not in seen:
all_toks.append(token_str)
seen.add(token_str)
# Add extra special tokens
for token in self._extra_special_tokens:
token_str = str(token)
if token_str not in seen:
all_toks.append(token_str)
seen.add(token_str)
return all_toks
@property
def all_special_ids(self) -> list[int]:
"""
`list[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
"""
return self.convert_tokens_to_ids(self.all_special_tokens)
def _set_model_specific_special_tokens(self, special_tokens: dict[str, Union[str, AddedToken]]):
"""
Adds new model-specific special tokens (e.g., for multimodal models).
These tokens are added to the named special tokens map and will be saved in tokenizer config.
For example: if the model tokenizer is multimodal, we can support special image or audio tokens.
Args:
special_tokens: Dictionary of {token_name: token_value}
"""
self.SPECIAL_TOKENS_ATTRIBUTES = self.SPECIAL_TOKENS_ATTRIBUTES + list(special_tokens.keys())
for key, value in special_tokens.items():
if isinstance(value, (str, AddedToken)):
self._special_tokens_map[key] = value
else:
raise TypeError(f"Special token {key} has to be either str or AddedToken but got: {type(value)}")
@property
def added_tokens_decoder(self) -> dict[int, AddedToken]:
raise NotImplementedError()
def __repr__(self) -> str:
added_tokens_decoder_rep = "\n\t".join([f"{k}: {v.__repr__()}," for k, v in self.added_tokens_decoder.items()])
return (
f"{self.__class__.__name__}(name_or_path='{self.name_or_path}',"
f" vocab_size={self.vocab_size}, model_max_length={self.model_max_length},"
f" padding_side='{self.padding_side}', truncation_side='{self.truncation_side}',"
f" special_tokens={self.special_tokens_map},"
" added_tokens_decoder={\n\t" + added_tokens_decoder_rep + "\n}\n)"
)
def __len__(self) -> int:
raise NotImplementedError()
@property
def vocab_size(self) -> int:
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
raise NotImplementedError()
def get_vocab(self) -> dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
`tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
vocab.
Returns:
`dict[str, int]`: The vocabulary.
"""
raise NotImplementedError()
def convert_tokens_to_ids(self, tokens: Union[str, list[str]]) -> Union[int, list[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
tokens (`str` or `list[str]`): One or several token(s) to convert to token id(s).
Returns:
`int` or `list[int]`: The token id or list of token ids.
"""
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
return [self._convert_token_to_id_with_added_voc(token) for token in tokens]
def convert_ids_to_tokens(
self, ids: Union[int, list[int]], skip_special_tokens: bool = False
) -> Union[str, list[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `list[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `list[str]`: The decoded token(s).
"""
raise NotImplementedError()
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path: Union[str, os.PathLike],
*init_inputs,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
local_files_only: bool = False,
token: Optional[Union[str, bool]] = None,
revision: str = "main",
trust_remote_code=False,
**kwargs,
):
r"""
Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
tokenizer.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
- A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
`./my_model_directory/vocab.txt`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
local_files_only (`bool`, *optional*, defaults to `False`):
Whether or not to only rely on local files and not to attempt to download any files.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
subfolder (`str`, *optional*):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
inputs (additional positional arguments, *optional*):
Will be passed along to the Tokenizer `__init__` method.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (additional keyword arguments, *optional*):
Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
`eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
`extra_special_tokens`. See parameters in the `__init__` for more details.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
# We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from huggingface.co and cache.
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
# Download vocabulary from huggingface.co (user-uploaded) and cache.
tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
# If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>")
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == "<unk>"
```"""
proxies = kwargs.pop("proxies", None)
subfolder = kwargs.pop("subfolder", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
commit_hash = kwargs.pop("_commit_hash", None)
gguf_file = kwargs.get("gguf_file")
user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
vocab_files = {}
init_configuration = {}
is_local = os.path.isdir(pretrained_model_name_or_path)
single_file_id = None
if os.path.isfile(pretrained_model_name_or_path):
# For legacy support: allow single-file loading if:
# 1. Only one vocab file is required, OR
# 2. It's a fast tokenizer with tokenizer_file (which is optional), OR
# 3. It's a GGUF file
vocab_files_count = len(cls.vocab_files_names)
has_optional_tokenizer_file = vocab_files_count > 1 and "tokenizer_file" in cls.vocab_files_names
if vocab_files_count > 1 and not gguf_file and not has_optional_tokenizer_file:
raise ValueError(
f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not "
"supported for this tokenizer. Use a model identifier or the path to a directory instead."
)
# Use first vocab file that's not tokenizer_file
file_id = list(cls.vocab_files_names.keys())[0]
if file_id == "tokenizer_file" and vocab_files_count > 1:
file_id = [k for k in cls.vocab_files_names.keys() if k != "tokenizer_file"][0]
vocab_files[file_id] = pretrained_model_name_or_path
single_file_id = file_id
else:
if gguf_file:
vocab_files["vocab_file"] = gguf_file
else:
# At this point pretrained_model_name_or_path is either a directory or a model identifier name
additional_files_names = {
"added_tokens_file": ADDED_TOKENS_FILE, # kept only for legacy
"special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, # kept only for legacy
"tokenizer_config_file": TOKENIZER_CONFIG_FILE,
# tokenizer_file used to initialize a slow from a fast. Properly copy the `addedTokens` instead of adding in random orders
"tokenizer_file": FULL_TOKENIZER_FILE,
"chat_template_file": CHAT_TEMPLATE_FILE,
}
vocab_files = {**cls.vocab_files_names, **additional_files_names}
if "tokenizer_file" in vocab_files:
# Try to get the tokenizer config to see if there are versioned tokenizer files.
fast_tokenizer_file = FULL_TOKENIZER_FILE
try:
resolved_config_file = cached_file(
pretrained_model_name_or_path,
TOKENIZER_CONFIG_FILE,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
subfolder=subfolder,
user_agent=user_agent,
_raise_exceptions_for_missing_entries=False,
_commit_hash=commit_hash,
)
except OSError:
# Re-raise any error raised by cached_file in order to get a helpful error message
raise
except Exception:
# For any other exception, we throw a generic error.
raise OSError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing all relevant files for a {cls.__name__} tokenizer."
)
commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
if resolved_config_file is not None:
with open(resolved_config_file, encoding="utf-8") as reader:
tokenizer_config = json.load(reader)
if "fast_tokenizer_files" in tokenizer_config:
fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"])
vocab_files["tokenizer_file"] = fast_tokenizer_file
# This block looks for any extra chat template files
if is_local:
template_dir = Path(pretrained_model_name_or_path, CHAT_TEMPLATE_DIR)
if template_dir.is_dir():
for template_file in template_dir.glob("*.jinja"):
template_name = template_file.name.removesuffix(".jinja")
vocab_files[f"chat_template_{template_name}"] = f"{CHAT_TEMPLATE_DIR}/{template_file.name}"
else:
for template in list_repo_templates(
pretrained_model_name_or_path,
local_files_only=local_files_only,
revision=revision,
cache_dir=cache_dir,
token=token,
):
template = template.removesuffix(".jinja")
vocab_files[f"chat_template_{template}"] = f"{CHAT_TEMPLATE_DIR}/{template}.jinja"
remote_files = []
if not is_local and not local_files_only:
try:
remote_files = list_repo_files(pretrained_model_name_or_path)
except Exception:
remote_files = []
elif pretrained_model_name_or_path and os.path.isdir(pretrained_model_name_or_path):
remote_files = os.listdir(pretrained_model_name_or_path)
if "tokenizer_file" in vocab_files and not re.search(vocab_files["tokenizer_file"], "".join(remote_files)):
# mistral tokenizer names are different, but we can still convert them if
# mistral common is not there
other_pattern = r"tekken\.json|tokenizer\.model\.*"
if match := re.search(other_pattern, "\n".join(remote_files)):
vocab_files["vocab_file"] = match.group()
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None:
resolved_vocab_files[file_id] = None
elif single_file_id == file_id:
if os.path.isfile(file_path):
resolved_vocab_files[file_id] = file_path
else:
try:
resolved_vocab_files[file_id] = cached_file(
pretrained_model_name_or_path,
file_path,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
_commit_hash=commit_hash,
)
except OSError:
# Re-raise any error raised by cached_file in order to get a helpful error message
raise
except Exception:
# For any other exception, we throw a generic error.
raise OSError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing all relevant files for a {cls.__name__} tokenizer."
)
commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash)
for file_id, file_path in vocab_files.items():
if file_id not in resolved_vocab_files:
continue
if is_local:
logger.info(f"loading file {file_path}")
else:
logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
return cls._from_pretrained(
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
token=token,
cache_dir=cache_dir,
local_files_only=local_files_only,
_commit_hash=commit_hash,
_is_local=is_local,
trust_remote_code=trust_remote_code,
**kwargs,
)
@classmethod
def _from_pretrained(
cls,
resolved_vocab_files,
pretrained_model_name_or_path,
init_configuration,
*init_inputs,
token=None,
cache_dir=None,
local_files_only=False,
_commit_hash=None,
_is_local=False,
trust_remote_code=False,
**kwargs,
):
# We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json
# file or if `from_slow` is set to True.
from_slow = kwargs.get("from_slow", False)
gguf_file = kwargs.get("gguf_file")
has_tokenizer_file = resolved_vocab_files.get("tokenizer_file", None) is not None
# If one passes a GGUF file path to `gguf_file` there is no need for this check as the tokenizer will be
# loaded directly from the GGUF file.
if (from_slow or not has_tokenizer_file) and cls.slow_tokenizer_class is not None and not gguf_file:
slow_tokenizer = (cls.slow_tokenizer_class)._from_pretrained(
copy.deepcopy(resolved_vocab_files),
pretrained_model_name_or_path,
copy.deepcopy(init_configuration),
*init_inputs,
token=token,
cache_dir=cache_dir,
local_files_only=local_files_only,
_commit_hash=_commit_hash,
**(copy.deepcopy(kwargs)),
)
else:
slow_tokenizer = None
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop("tokenizer_config_file", None)
if tokenizer_config_file is not None:
with open(tokenizer_config_file, encoding="utf-8") as tokenizer_config_handle:
init_kwargs = json.load(tokenizer_config_handle)
# First attempt. We get tokenizer_class from tokenizer_config to check mismatch between tokenizers.
config_tokenizer_class = init_kwargs.get("tokenizer_class")
init_kwargs.pop("tokenizer_class", None)
if not has_tokenizer_file:
init_kwargs.get("tokenizer_file", None)
saved_init_inputs = init_kwargs.pop("init_inputs", ())
if not init_inputs:
init_inputs = saved_init_inputs
else:
config_tokenizer_class = None
init_kwargs = init_configuration
# If independent chat template file(s) exist, they take priority over template entries in the tokenizer config
chat_templates = {}
chat_template_file = resolved_vocab_files.pop("chat_template_file", None)
extra_chat_templates = [key for key in resolved_vocab_files if key.startswith("chat_template_")]
if chat_template_file is not None:
with open(chat_template_file, encoding="utf-8") as chat_template_handle:
chat_templates["default"] = chat_template_handle.read()
for extra_chat_template in extra_chat_templates:
template_file = resolved_vocab_files.pop(extra_chat_template, None)
if template_file is None:
continue # I think this should never happen, but just in case
template_name = extra_chat_template.removeprefix("chat_template_")
with open(template_file) as chat_template_handle:
chat_templates[template_name] = chat_template_handle.read()
if len(chat_templates) == 1 and "default" in chat_templates:
init_kwargs["chat_template"] = chat_templates["default"]
elif chat_templates:
init_kwargs["chat_template"] = chat_templates
if not _is_local:
if "auto_map" in init_kwargs:
# For backward compatibility with odl format.
if isinstance(init_kwargs["auto_map"], (tuple, list)):
init_kwargs["auto_map"] = {"AutoTokenizer": init_kwargs["auto_map"]}
if config_tokenizer_class is None:
# Matt: This entire block is only used to decide if the tokenizer class matches the class in the repo.
# If not, it raises a warning, but otherwise continues. Since we mostly load tokenizers with
# AutoTokenizer these days, it seems like a lot of work (and a source of bugs) for little gain.
# Maybe we can just remove this entirely?
from .models.auto.configuration_auto import AutoConfig # tests_ignore
# Second attempt. If we have not yet found tokenizer_class, let's try to use the config.
try:
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path,
token=token,
cache_dir=cache_dir,
local_files_only=local_files_only,
trust_remote_code=trust_remote_code,
_commit_hash=_commit_hash,
)
config_tokenizer_class = config.tokenizer_class
except (OSError, ValueError, KeyError):
# skip if an error occurred.
config = None
if config_tokenizer_class is None:
# Third attempt. If we have not yet found the original type of the tokenizer,
# we are loading we see if we can infer it from the type of the configuration file
from .models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES # tests_ignore
if hasattr(config, "model_type"):
model_type = config.model_type
else:
# Fallback: use pattern matching on the string.
model_type = None
for pattern in TOKENIZER_MAPPING_NAMES:
if pattern in str(pretrained_model_name_or_path):
model_type = pattern
break
if model_type is not None:
config_tokenizer_class = TOKENIZER_MAPPING_NAMES.get(model_type)
if config_tokenizer_class is not None:
if cls.__name__.replace("Fast", "") != config_tokenizer_class.replace("Fast", ""):
logger.warning(
"The tokenizer class you load from this checkpoint is not the same type as the class this"
" function is called from. It may result in unexpected tokenization. \nThe tokenizer class you"
f" load from this checkpoint is '{config_tokenizer_class}'. \nThe class this function is called"
f" from is '{cls.__name__}'."
)
# Preserve extra_special_tokens from tokenizer_config.json before updating with kwargs
# extra_special_tokens should be a list (user-defined extra tokens)
extra_special_tokens_from_config = init_kwargs.get("extra_special_tokens")
if isinstance(extra_special_tokens_from_config, (list, tuple)):
extra_special_tokens_from_config = list(extra_special_tokens_from_config)
else:
extra_special_tokens_from_config = None
# Update with newly provided kwargs
init_kwargs.update(kwargs)
# V5: Backward compatibility - convert old "additional_special_tokens" to "extra_special_tokens"
if "additional_special_tokens" in init_kwargs and "extra_special_tokens" not in init_kwargs:
init_kwargs["extra_special_tokens"] = init_kwargs.pop("additional_special_tokens")
# Restore extra_special_tokens from config if kwargs overwrote it or it's missing
elif extra_special_tokens_from_config is not None:
if "extra_special_tokens" not in init_kwargs or not isinstance(
init_kwargs.get("extra_special_tokens"), (list, tuple)
):
init_kwargs["extra_special_tokens"] = extra_special_tokens_from_config
# V5: Get model-specific special tokens from config (saved as individual keys in special_tokens_map)
# These need to be grouped as extra_special_tokens dict so __init__ can save them to attributes
if "extra_special_tokens" not in init_kwargs or not isinstance(init_kwargs.get("extra_special_tokens"), dict):
default_attrs = set(cls.SPECIAL_TOKENS_ATTRIBUTES)
model_specific_tokens = {
key: init_kwargs.pop(key)
for key in list(init_kwargs.keys())
if key not in default_attrs
and key.endswith("_token")
and isinstance(init_kwargs[key], (str, AddedToken))
}
if model_specific_tokens:
# If extra_special_tokens is already a list, we need to preserve it
if "extra_special_tokens" in init_kwargs and isinstance(
init_kwargs["extra_special_tokens"], (list, tuple)
):
# Keep the list as is, but also add model-specific tokens as a separate dict
# Convert to model_specific_special_tokens so __init__ handles it
init_kwargs["model_specific_special_tokens"] = model_specific_tokens
else:
init_kwargs["extra_special_tokens"] = model_specific_tokens
elif isinstance(init_kwargs.get("extra_special_tokens"), dict):
# If extra_special_tokens is already a dict, convert it to model_specific_special_tokens
# so __init__ handles it properly
init_kwargs["model_specific_special_tokens"] = init_kwargs.pop("extra_special_tokens")
# Merge resolved_vocab_files arguments in init_kwargs.
added_tokens_file = resolved_vocab_files.pop("added_tokens_file", None)
special_tokens_map_file = resolved_vocab_files.pop("special_tokens_map_file", None)
for args_name, file_path in resolved_vocab_files.items():
if args_name not in init_kwargs or init_kwargs[args_name] is None:
init_kwargs[args_name] = file_path
tokenizer_file = resolved_vocab_files.get("tokenizer_file", None)
if slow_tokenizer is not None:
init_kwargs["__slow_tokenizer"] = slow_tokenizer
init_kwargs["name_or_path"] = pretrained_model_name_or_path
init_kwargs["is_local"] = _is_local
#### Handle tokenizer serialization of added and special tokens
added_tokens_decoder: dict[int, AddedToken] = {}
added_tokens_map: dict[str, AddedToken] = {}
# if we have info on the slow added tokens
if "added_tokens_decoder" in init_kwargs:
for idx, token in init_kwargs["added_tokens_decoder"].items():
if isinstance(token, dict):
token = AddedToken(**token)
if isinstance(token, AddedToken):
added_tokens_decoder[int(idx)] = token
added_tokens_map[str(token)] = token
else:
raise TypeError(
f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance"
)
else:
# begin legacy: read the added_tokens_file and update kwargs with special_tokens_map if modified
if special_tokens_map_file is not None:
with open(special_tokens_map_file, encoding="utf-8") as special_tokens_map_handle:
special_tokens_map = json.load(special_tokens_map_handle)
# Preserve extra_special_tokens from tokenizer_config.json before processing special_tokens_map
extra_special_tokens_before_map = init_kwargs.get("extra_special_tokens")
if isinstance(extra_special_tokens_before_map, (list, tuple)):
extra_special_tokens_before_map = list(extra_special_tokens_before_map)
else:
extra_special_tokens_before_map = None
for key, value in special_tokens_map.items():
if key in kwargs and kwargs[key]:
# This value has already been redefined by the kwargs
# We keep this new value and ignore the one stored in the special_tokens_map_file
continue
# V5: Convert dict-format tokens to AddedToken
if isinstance(value, dict):
value["special"] = True
value = AddedToken(**value)
elif key == "extra_special_tokens":
# Handle extra_special_tokens from special_tokens_map.json
if isinstance(value, dict):
# Dict format for model-specific tokens - keep as is
init_kwargs[key] = value
continue
elif isinstance(value, list):
# List format - merge with existing if present
existing = init_kwargs.pop("extra_special_tokens", []) or []
if not isinstance(existing, (list, tuple)):
existing = []
for token in value:
if isinstance(token, dict):
token = AddedToken(**token, special=True)
if token not in existing:
existing.append(token)
init_kwargs[key] = existing
continue
init_kwargs[key] = value
# Restore extra_special_tokens from tokenizer_config.json if not in special_tokens_map.json
if (
"extra_special_tokens" not in special_tokens_map
and extra_special_tokens_before_map is not None
):
if "extra_special_tokens" not in init_kwargs or not isinstance(
init_kwargs.get("extra_special_tokens"), (list, tuple)
):
init_kwargs["extra_special_tokens"] = extra_special_tokens_before_map
# Convert extra_special_tokens dict to model_specific_special_tokens if it's a dict
if isinstance(init_kwargs.get("extra_special_tokens"), dict):
init_kwargs["model_specific_special_tokens"] = init_kwargs.pop("extra_special_tokens")
# slow -> slow|fast, legacy: convert the `"added_tokens.json"` file to `added_tokens_decoder`.
# this is for legacy purpose. We don't add the tokens after init for efficiency.
if added_tokens_file is not None:
special_tokens = []
# V5: Check both named and extra special tokens
for key in cls.SPECIAL_TOKENS_ATTRIBUTES:
if key in init_kwargs and init_kwargs[key] is not None:
special_tokens.append(str(init_kwargs[key]))
# Handle extra_special_tokens
if "extra_special_tokens" in init_kwargs and init_kwargs["extra_special_tokens"] is not None:
special_tokens += [str(token) for token in init_kwargs["extra_special_tokens"]]
with open(added_tokens_file, encoding="utf-8") as added_tokens_handle:
added_tok_encoder = json.load(added_tokens_handle)
for str_token, index in added_tok_encoder.items():
# if index not in added_tokens_decoder and str_token not in added_tokens_map:
special = str_token in special_tokens
added_tokens_decoder[index] = AddedToken(
str_token, rstrip=False, lstrip=False, normalized=not special, special=special
)
added_tokens_map[str(token)] = added_tokens_decoder[index]
# allows converting a fast -> slow: add the `tokenizer.json`'s `"added_tokens"` to the slow tokenizer
# if `tokenizer_config.json` is `None`
if tokenizer_file is not None:
# This is for slow so can be done before
with open(tokenizer_file, encoding="utf-8") as tokenizer_file_handle:
tokenizer_file_handle = json.load(tokenizer_file_handle)
added_tokens = tokenizer_file_handle.pop("added_tokens")
for serialized_tokens in added_tokens:
idx = serialized_tokens.pop("id")
added_tokens_decoder[idx] = AddedToken(**serialized_tokens)
added_tokens_map[str(added_tokens_decoder[idx])] = added_tokens_decoder[idx]
# end legacy
# Passing AddedTokens and not strings to the class to prevent it from casting the string to a different AddedToken
# convert {'__type': 'AddedToken', 'content': '<ent>', 'lstrip': False, 'normalized': True, ...} to AddedTokens
init_kwargs["added_tokens_decoder"] = added_tokens_decoder
init_kwargs = cls.convert_added_tokens(init_kwargs, save=False)
# V5: Map special tokens from added_tokens_map (named tokens only)
for key in cls.SPECIAL_TOKENS_ATTRIBUTES:
if key in init_kwargs and added_tokens_map != {} and init_kwargs[key] is not None:
init_kwargs[key] = added_tokens_map.get(str(init_kwargs[key]), init_kwargs[key])
# Track which files were loaded (if not already set by AutoTokenizer)
if "files_loaded" not in init_kwargs:
files_loaded = []
# Check which files this tokenizer class actually uses based on vocab_files_names
tokenizer_needs_files = set(cls.vocab_files_names.keys()) if hasattr(cls, "vocab_files_names") else set()
# If tokenizer_file is in the class's vocab_files_names and exists, prioritize it (TokenizersBackend)
if "tokenizer_file" in tokenizer_needs_files and resolved_vocab_files.get("tokenizer_file"):
files_loaded.append(os.path.basename(resolved_vocab_files["tokenizer_file"]))
else:
# Otherwise, add the actual vocab files that were used by this tokenizer class
for file_key, file_path in resolved_vocab_files.items():
if (
file_path
and file_key not in ["tokenizer_config_file", "special_tokens_map_file", "added_tokens_file"]
and file_key in tokenizer_needs_files
):
# Extract just the filename from the path
files_loaded.append(os.path.basename(file_path))
init_kwargs["files_loaded"] = files_loaded
# Instantiate the tokenizer.
try:
tokenizer = cls(*init_inputs, **init_kwargs)
except import_protobuf_decode_error():
raise RuntimeError(
"Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
"(Google protobuf error: Tried to load SPM model with non-SPM vocab file).",
)
except RuntimeError as e:
if "sentencepiece_processor.cc" in str(e):
raise RuntimeError(
"Unable to load tokenizer model from SPM, loading from TikToken will be attempted instead."
"(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).",
) from e
else:
raise e
except OSError:
raise OSError(
"Unable to load vocabulary from file. "
"Please check that the provided vocabulary is accessible and not corrupted."
)
# If tokenizer_file exists and tokenizer has a TokenizersBackend, replace the blank tokenizer with tokenizer.json
if tokenizer_file is not None and hasattr(tokenizer, "_tokenizer"):
from tokenizers import Tokenizer as TokenizerFast
tokenizer._tokenizer = TokenizerFast.from_file(tokenizer_file)
# Re-run post-initialization if the tokenizer has it
if hasattr(tokenizer, "_post_init"):
tokenizer._post_init()
# If only SPM exists, try to get vocab and merges and init to load a tokenizers-backend
else:
spm_filename = find_sentencepiece_model_file(
pretrained_model_name_or_path,
revision=kwargs.get("revision"),
token=kwargs.get("token"),
cache_dir=kwargs.get("cache_dir"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
if spm_filename is not None:
try:
resolved_spm = cached_file(
pretrained_model_name_or_path,
spm_filename,
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
except Exception:
resolved_spm = None
if resolved_spm is not None:
try:
# Mirror AutoTokenizer fallback: extract vocab/merges from SentencePiece
import inspect as _inspect
from .tokenization_utils_sentencepiece import SentencePieceExtractor
class_sig = _inspect.signature(getattr(cls, "__init__", cls))
vocab_ids, vocab_scores, merges = SentencePieceExtractor(resolved_spm).extract()
files_loaded = [spm_filename]
init_kwargs["backend"] = "tokenizers"
init_kwargs["files_loaded"] = files_loaded
# If tokenizer needs merges too (BPE), pass both; unigram models only need vocab
if "merges" in class_sig.parameters:
return cls.from_pretrained(
pretrained_model_name_or_path,
*init_inputs,
vocab=vocab_scores,
merges=merges,
**init_kwargs,
)
elif "vocab" in class_sig.parameters:
return cls.from_pretrained(
pretrained_model_name_or_path,
*init_inputs,
vocab=vocab_scores,
**init_kwargs,
)
except Exception as e:
logger.warning(
f"Could not extract vocab/merges from the SentencePiece model to initialize a Tokenizers backend: {e}. We are falling back so we are falling back to the standard loading method."
)
pass
# Fallback to vocab.json + merges.txt (BPE) or just vocab.json (WordLevel/WordPiece)
vocab, merges, files_loaded = load_vocab_and_merges(
pretrained_model_name_or_path,
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
if vocab is not None:
try:
import inspect as _inspect
class_sig = _inspect.signature(getattr(cls, "__init__", cls))
init_kwargs["backend"] = "tokenizers"
init_kwargs["files_loaded"] = files_loaded
if merges is not None and "merges" in class_sig.parameters:
return cls.from_pretrained(
pretrained_model_name_or_path,
*init_inputs,
vocab=vocab,
merges=merges,
**init_kwargs,
)
elif "vocab" in class_sig.parameters:
return cls.from_pretrained(
pretrained_model_name_or_path,
*init_inputs,
vocab=vocab,
**init_kwargs,
)
except Exception:
pass
if added_tokens_decoder != {} and max(list(added_tokens_decoder.keys())[-1], 0) > tokenizer.vocab_size:
logger.info(
"Special tokens have been added in the vocabulary, make sure the associated word embeddings are"
" fine-tuned or trained."
)
return tokenizer
@classmethod
def convert_added_tokens(cls, obj: Union[AddedToken, Any], save=False, add_type_field=True):
if isinstance(obj, dict) and "__type" in obj and obj["__type"] == "AddedToken":
obj.pop("__type")
return AddedToken(**obj)
if isinstance(obj, AddedToken) and save:
obj = obj.__getstate__()
if add_type_field:
obj["__type"] = "AddedToken"
else:
# Don't save "special" for previous tokenizers
obj.pop("special")
return obj
elif isinstance(obj, (list, tuple)):
return [cls.convert_added_tokens(o, save=save, add_type_field=add_type_field) for o in obj]
elif isinstance(obj, dict):
return {k: cls.convert_added_tokens(v, save=save, add_type_field=add_type_field) for k, v in obj.items()}
return obj
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
push_to_hub: bool = False,
**kwargs,
) -> tuple[str, ...]:
"""
Save the full tokenizer state.
This method make sure the full tokenizer can then be re-loaded using the
[`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..
Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
instance, modifying `tokenizer.do_lower_case` after creation).
Args:
save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
legacy_format (`bool`, *optional*):
Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
added_tokens files.
If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
"slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
loaded in the corresponding "slow" tokenizer.
If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
error is raised.
filename_prefix (`str`, *optional*):
A prefix to add to the names of the files saved by the tokenizer.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
Returns:
A tuple of `str`: The files saved.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = create_repo(repo_id, exist_ok=True, **kwargs).repo_id
files_timestamps = self._get_files_timestamps(save_directory)
tokenizer_config_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + TOKENIZER_CONFIG_FILE
)
tokenizer_config = copy.deepcopy(self.init_kwargs)
# Let's save the init kwargs
target_keys = set(self.init_kwargs.keys())
# Let's save the special tokens map (only the strings)
target_keys.update(["model_max_length"])
for k in target_keys:
if hasattr(self, k):
tokenizer_config[k] = getattr(self, k)
# Let's make sure we properly save the special tokens
# V5: Save both named tokens and extra tokens
tokenizer_config.update(self.special_tokens_map)
if self._extra_special_tokens:
tokenizer_config["extra_special_tokens"] = self.extra_special_tokens
save_jinja_files = kwargs.get("save_jinja_files", True)
tokenizer_config, saved_raw_chat_template_files = self.save_chat_templates(
save_directory, tokenizer_config, filename_prefix, save_jinja_files
)
if len(self.init_inputs) > 0:
tokenizer_config["init_inputs"] = copy.deepcopy(self.init_inputs)
for file_id in self.vocab_files_names:
tokenizer_config.pop(file_id, None)
# no typefields, this way old fast and slow can load it
tokenizer_config = self.convert_added_tokens(tokenizer_config, add_type_field=True, save=True)
# Process added tokens separately: allows previous versions to ignore it!
added_tokens = {}
for key, value in self.added_tokens_decoder.items():
added_tokens[key] = value.__getstate__()
tokenizer_config["added_tokens_decoder"] = added_tokens
# Add tokenizer class to the tokenizer config to be able to reload it with from_pretrained
tokenizer_class = self.__class__.__name__
# tokenizers backend don't need to save added_tokens_decoder
if any(base.__name__ == "TokenizersBackend" for base in self.__class__.__mro__):
tokenizer_config.pop("added_tokens_decoder", None)
# Remove the Fast at the end if we can save the slow tokenizer
if tokenizer_class.endswith("Fast") and getattr(self, "can_save_slow_tokenizer", False):
tokenizer_class = tokenizer_class[:-4]
tokenizer_config["tokenizer_class"] = tokenizer_class
if getattr(self, "_auto_map", None) is not None:
tokenizer_config["auto_map"] = self._auto_map
if getattr(self, "_processor_class", None) is not None:
tokenizer_config["processor_class"] = self._processor_class
tokenizer_config.pop("files_loaded", None)
# If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=tokenizer_config)
# remove private information
if "name_or_path" in tokenizer_config:
tokenizer_config.pop("name_or_path")
tokenizer_config.pop("special_tokens_map_file", None)
tokenizer_config.pop("tokenizer_file", None)
if "device_map" in tokenizer_config:
tokenizer_config.pop("device_map")
if "slow_tokenizer_class" in tokenizer_config:
tokenizer_config.pop("slow_tokenizer_class")
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
out_str = json.dumps(tokenizer_config, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
f.write(out_str)
logger.info(f"tokenizer config file saved in {tokenizer_config_file}")
# Sanitize AddedTokens in special_tokens_map
file_names = (tokenizer_config_file, *saved_raw_chat_template_files)
save_files = self._save_pretrained(
save_directory=save_directory,
file_names=file_names,
legacy_format=legacy_format,
filename_prefix=filename_prefix,
)
if push_to_hub:
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=kwargs.get("token"),
)
return save_files
def _save_pretrained(
self,
save_directory: Union[str, os.PathLike],
file_names: tuple[str, ...],
legacy_format: Optional[bool] = None,
filename_prefix: Optional[str] = None,
) -> tuple[str, ...]:
"""
Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.
Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
specific [`~tokenization_utils_tokenizers.PreTrainedTokenizerFast._save_pretrained`]
"""
if legacy_format is False:
raise ValueError(
"Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format."
)
save_directory = str(save_directory)
added_tokens_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + ADDED_TOKENS_FILE
)
# the new get_added_vocab() also returns special tokens and tokens that have an index < vocab_size
added_vocab = {tok: index for tok, index in self.added_tokens_encoder.items() if index >= self.vocab_size}
if added_vocab:
with open(added_tokens_file, "w", encoding="utf-8") as f:
out_str = json.dumps(added_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n"
f.write(out_str)
logger.info(f"added tokens file saved in {added_tokens_file}")
vocab_files = self.save_vocabulary(save_directory, filename_prefix=filename_prefix)
return file_names + vocab_files + (added_tokens_file,)
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str, ...]:
"""
Save only the vocabulary of the tokenizer (vocabulary + added tokens).
This method won't save the configuration and special token mappings of the tokenizer. Use
[`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.
Args:
save_directory (`str`):
The directory in which to save the vocabulary.
filename_prefix (`str`, *optional*):
An optional prefix to add to the named of the saved files.
Returns:
`tuple(str)`: Paths to the files saved.
"""
raise NotImplementedError
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> list[str]:
"""
Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`.
Args:
text (`str`):
The sequence to be encoded.
pair (`str`, *optional*):
A second sequence to be encoded with the first.
add_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to add the special tokens associated with the corresponding model.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific encode method. See details in
[`~PreTrainedTokenizerBase.__call__`]
Returns:
`list[str]`: The list of tokens.
"""
raise NotImplementedError
@add_end_docstrings(
ENCODE_KWARGS_DOCSTRING,
"""
**kwargs: Passed along to the `.tokenize()` method.
""",
"""
Returns:
`list[int]`, `torch.Tensor`, or `np.ndarray`: The tokenized ids of the text.
""",
)
def encode(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy, None] = None,
max_length: Optional[int] = None,
stride: int = 0,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
) -> list[int]:
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.
Args:
text (`str`, `list[str]` or `list[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
text_pair (`str`, `list[str]` or `list[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method).
"""
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=kwargs.get("pad_to_multiple_of"),
verbose=kwargs.get("verbose", True),
**kwargs,
)
encoded_inputs = self._encode_plus(
text,
text_pair=text_pair,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
padding_side=padding_side,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
def num_special_tokens_to_add(self, pair: bool = False) -> int:
raise NotImplementedError
@property
def max_len_single_sentence(self) -> int:
"""
`int`: The maximum length of a sentence that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=False)
@max_len_single_sentence.setter
def max_len_single_sentence(self, value) -> None:
# For backward compatibility, allow to try to setup 'max_len_single_sentence'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=False) and self.verbose:
if not self.deprecation_warnings.get("max_len_single_sentence", False):
logger.warning(
"Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
)
self.deprecation_warnings["max_len_single_sentence"] = True
else:
raise ValueError(
"Setting 'max_len_single_sentence' is now deprecated. This value is automatically set up."
)
@property
def max_len_sentences_pair(self) -> int:
"""
`int`: The maximum combined length of a pair of sentences that can be fed to the model.
"""
return self.model_max_length - self.num_special_tokens_to_add(pair=True)
@max_len_sentences_pair.setter
def max_len_sentences_pair(self, value) -> None:
# For backward compatibility, allow to try to setup 'max_len_sentences_pair'.
if value == self.model_max_length - self.num_special_tokens_to_add(pair=True) and self.verbose:
if not self.deprecation_warnings.get("max_len_sentences_pair", False):
logger.warning(
"Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up."
)
self.deprecation_warnings["max_len_sentences_pair"] = True
else:
raise ValueError("Setting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.")
def _get_padding_truncation_strategies(
self, padding=False, truncation=None, max_length=None, pad_to_multiple_of=None, verbose=True, **kwargs
):
"""
Find the correct padding/truncation strategy
"""
# Backward compatibility for previous behavior:
# If you only set max_length, it activates truncation for max_length
if max_length is not None and padding is False and truncation is None:
truncation = "longest_first"
# Get padding strategy
if padding is not False:
if padding is True:
if verbose:
if max_length is not None and (
truncation is None or truncation is False or truncation == "do_not_truncate"
):
warnings.warn(
"`max_length` is ignored when `padding`=`True` and there is no truncation strategy. "
"To pad to max length, use `padding='max_length'`."
)
padding_strategy = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(padding, PaddingStrategy):
padding_strategy = PaddingStrategy(padding)
elif isinstance(padding, PaddingStrategy):
padding_strategy = padding
else:
padding_strategy = PaddingStrategy.DO_NOT_PAD
# Get truncation strategy
if truncation is not False and truncation is not None:
if truncation is True:
truncation_strategy = (
TruncationStrategy.LONGEST_FIRST
) # Default to truncate the longest sequences in pairs of inputs
elif not isinstance(truncation, TruncationStrategy):
truncation_strategy = TruncationStrategy(truncation)
elif isinstance(truncation, TruncationStrategy):
truncation_strategy = truncation
else:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
if self.model_max_length > LARGE_INTEGER:
padding_strategy = PaddingStrategy.DO_NOT_PAD
else:
max_length = self.model_max_length
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
if self.model_max_length > LARGE_INTEGER:
truncation_strategy = TruncationStrategy.DO_NOT_TRUNCATE
else:
max_length = self.model_max_length
# Test if we have a padding token
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.pad_token is None or self.pad_token_id < 0):
raise ValueError(
"Asking to pad but the tokenizer does not have a padding token. "
"Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` "
"or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`."
)
# Check that we will truncate to a multiple of pad_to_multiple_of if both are provided
if (
truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE
and padding_strategy != PaddingStrategy.DO_NOT_PAD
and pad_to_multiple_of is not None
and max_length is not None
and (max_length % pad_to_multiple_of != 0)
):
raise ValueError(
"Truncation and padding are both activated but "
f"truncation length ({max_length}) is not a multiple of pad_to_multiple_of ({pad_to_multiple_of})."
)
return padding_strategy, truncation_strategy, max_length, kwargs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], None] = None,
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_target: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput], None] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy, None] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
tokenizer_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences.
Args:
text (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
text_pair_target (`str`, `list[str]`, `list[list[str]]`, *optional*):
The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
tokenizer_kwargs (`dict[str, Any]`, *optional*):
Additional kwargs to pass to the tokenizer. These will be merged with the explicit parameters and
other kwargs, with explicit parameters taking precedence.
"""
# To avoid duplicating
all_kwargs = {
"add_special_tokens": add_special_tokens,
"padding": padding,
"truncation": truncation,
"max_length": max_length,
"stride": stride,
"is_split_into_words": is_split_into_words,
"pad_to_multiple_of": pad_to_multiple_of,
"padding_side": padding_side,
"return_tensors": return_tensors,
"return_token_type_ids": return_token_type_ids,
"return_attention_mask": return_attention_mask,
"return_overflowing_tokens": return_overflowing_tokens,
"return_special_tokens_mask": return_special_tokens_mask,
"return_offsets_mapping": return_offsets_mapping,
"return_length": return_length,
"split_special_tokens": kwargs.pop("split_special_tokens", self.split_special_tokens),
"verbose": verbose,
}
max_target_length = kwargs.pop("max_target_length", None)
# First merge tokenizer_kwargs, then other kwargs (explicit params take precedence)
if tokenizer_kwargs is not None:
all_kwargs.update(tokenizer_kwargs)
all_kwargs.update(kwargs)
if text is None and text_target is None:
raise ValueError("You need to specify either `text` or `text_target`.")
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=all_kwargs.pop("padding", False),
truncation=all_kwargs.pop("truncation", None),
max_length=all_kwargs.pop("max_length", None),
pad_to_multiple_of=all_kwargs.get("pad_to_multiple_of"),
verbose=all_kwargs.get("verbose", True),
**kwargs,
)
if text is not None:
# The context manager will send the inputs as normal texts and not text_target, but we shouldn't change the
# input mode in this case.
if not self._in_target_context_manager and hasattr(self, "_switch_to_input_mode"):
self._switch_to_input_mode()
encodings = self._encode_plus(
text=text,
text_pair=text_pair,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
**all_kwargs,
)
if text_target is not None:
if hasattr(self, "_switch_to_target_mode"):
self._switch_to_target_mode()
target_encodings = self._encode_plus(
text=text_target,
text_pair=text_pair_target,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_target_length if max_target_length is not None else max_length,
**all_kwargs,
)
# Leave back tokenizer in input mode
if hasattr(self, "_switch_to_input_mode"):
self._switch_to_input_mode()
if text_target is None:
return encodings
elif text is None:
return target_encodings
else:
encodings["labels"] = target_encodings["input_ids"]
return encodings
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput, EncodedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
split_special_tokens: bool = False,
**kwargs,
) -> BatchEncoding:
raise NotImplementedError
def pad(
self,
encoded_inputs: Union[
BatchEncoding,
list[BatchEncoding],
dict[str, EncodedInput],
dict[str, list[EncodedInput]],
list[dict[str, EncodedInput]],
],
padding: Union[bool, str, PaddingStrategy] = True,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
verbose: bool = True,
) -> BatchEncoding:
"""
Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
in the batch.
Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
`self.pad_token_id` and `self.pad_token_type_id`).
Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the
text followed by a call to the `pad` method to get a padded encoding.
<Tip>
If the `encoded_inputs` passed are dictionary of numpy arrays, or PyTorch tensors, the
result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
PyTorch tensors, you will lose the specific device of your tensors however.
</Tip>
Args:
encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `dict[str, list[int]]`, `dict[str, list[list[int]]` or `list[dict[str, list[int]]]`):
Tokenized inputs. Can represent one input ([`BatchEncoding`] or `dict[str, list[int]]`) or a batch of
tokenized inputs (list of [`BatchEncoding`], *dict[str, list[list[int]]]* or *list[dict[str,
list[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
collate function.
Instead of `list[int]` you can have tensors (numpy arrays, or PyTorch tensors), see
the note above for the return type.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side (`str`, *optional*):
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
verbose (`bool`, *optional*, defaults to `True`):
Whether or not to print more information and warnings.
"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if (
isinstance(encoded_inputs, (list, tuple))
and len(encoded_inputs) > 0
and isinstance(encoded_inputs[0], Mapping)
):
# Call .keys() explicitly for compatibility with TensorDict and other Mapping subclasses
encoded_inputs = {key: [example[key] for example in encoded_inputs] for key in encoded_inputs[0].keys()}
# The model's main input name, usually `input_ids`, has been passed for padding
if self.model_input_names[0] not in encoded_inputs:
raise ValueError(
"You should supply an encoding or a list of encodings to this method "
f"that includes {self.model_input_names[0]}, but you provided {list(encoded_inputs.keys())}"
)
required_input = encoded_inputs[self.model_input_names[0]]
if required_input is None or (isinstance(required_input, Sized) and len(required_input) == 0):
if return_attention_mask:
encoded_inputs["attention_mask"] = []
return encoded_inputs
# If we have PyTorch/NumPy tensors/arrays as inputs, we cast them as python objects
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
first_element = required_input[0]
if isinstance(first_element, (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
for item in required_input:
if len(item) != 0:
first_element = item[0]
break
# At this state, if `first_element` is still a list/tuple, it's an empty one so there is nothing to do.
if not isinstance(first_element, (int, list, tuple)):
if is_torch_tensor(first_element):
return_tensors = "pt" if return_tensors is None else return_tensors
elif isinstance(first_element, np.ndarray):
return_tensors = "np" if return_tensors is None else return_tensors
else:
raise ValueError(
f"type of {first_element} unknown: {type(first_element)}. "
"Should be one of a python, numpy, or pytorch object."
)
for key, value in encoded_inputs.items():
encoded_inputs[key] = to_py_obj(value)
# Convert padding_strategy in PaddingStrategy
padding_strategy, _, max_length, _ = self._get_padding_truncation_strategies(
padding=padding, max_length=max_length, verbose=verbose
)
required_input = encoded_inputs[self.model_input_names[0]]
if required_input and not isinstance(required_input[0], (list, tuple)):
encoded_inputs = self._pad(
encoded_inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
return BatchEncoding(encoded_inputs, tensor_type=return_tensors)
batch_size = len(required_input)
assert all(len(v) == batch_size for v in encoded_inputs.values()), (
"Some items in the output dictionary have a different batch size than others."
)
if padding_strategy == PaddingStrategy.LONGEST:
max_length = max(len(inputs) for inputs in required_input)
padding_strategy = PaddingStrategy.MAX_LENGTH
batch_outputs = {}
for i in range(batch_size):
inputs = {k: v[i] for k, v in encoded_inputs.items()}
outputs = self._pad(
inputs,
max_length=max_length,
padding_strategy=padding_strategy,
pad_to_multiple_of=pad_to_multiple_of,
padding_side=padding_side,
return_attention_mask=return_attention_mask,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return BatchEncoding(batch_outputs, tensor_type=return_tensors)
def _pad(
self,
encoded_inputs: Union[dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
padding_side: Optional[str] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs:
Dictionary of tokenized inputs (`list[int]`) or batch of tokenized inputs (`list[list[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in `padding_side` argument:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
`>= 7.5` (Volta).
padding_side:
The side on which the model should have padding applied. Should be selected between ['right', 'left'].
Default value is picked from the class attribute of the same name.
return_attention_mask:
(optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
padding_side = padding_side if padding_side is not None else self.padding_side
if padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError(f"Invalid padding strategy:{padding_side}")
return encoded_inputs
def convert_tokens_to_string(self, tokens: list[str]) -> str:
"""
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`list[str]`): The token to join in a string.
Returns:
`str`: The joined tokens.
"""
raise NotImplementedError
def decode(
self,
token_ids: Union[int, list[int], list[list[int]], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
**kwargs,
) -> Union[str, list[str]]:
"""
Converts a sequence of ids into a string, or a list of sequences into a list of strings,
using the tokenizer and vocabulary with options to remove special tokens and clean up
tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, list[int], list[list[int]], np.ndarray, torch.Tensor]`):
A single sequence or a batch (list of sequences) of tokenized input ids. Can be obtained using the
`__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`Union[str, list[str]]`: The decoded string for a single sequence, or a list of decoded strings for a
batch of sequences.
"""
# Convert inputs to python lists
token_ids = to_py_obj(token_ids)
# If we received batched input, decode each sequence
if isinstance(token_ids, (list, tuple)) and len(token_ids) > 0 and isinstance(token_ids[0], (list, tuple)):
clean_up_tokenization_spaces = kwargs.pop("clean_up_tokenization_spaces", False)
return [
self._decode(
token_ids=seq,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
for seq in token_ids
]
return self._decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
**kwargs,
)
def batch_decode(
self,
sequences: Union[list[int], list[list[int]], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs,
) -> list[str]:
"""
Convert a list of lists of token ids into a list of strings by calling decode.
This method is provided for backwards compatibility. The `decode` method now handles batched input natively,
so you can use `decode` directly instead of `batch_decode`.
Args:
sequences (`Union[list[int], list[list[int]], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`list[str]`: The list of decoded sentences.
"""
# Forward to decode() which now handles batched input natively
result = self.decode(
token_ids=sequences,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
# Ensure we always return a list for backwards compatibility
if isinstance(result, str):
return [result]
return result
def _decode(
self,
token_ids: Union[int, list[int]],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
**kwargs,
) -> str:
raise NotImplementedError
def _eventual_warn_about_too_long_sequence(self, ids: list[int], max_length: Optional[int], verbose: bool):
"""
Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
corresponding model
Args:
ids (`list[str]`): The ids produced by the tokenization
max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
verbose (`bool`): Whether or not to print more information and warnings.
"""
if max_length is None and len(ids) > self.model_max_length and verbose and self.model_max_length != 0:
if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
logger.warning(
"Token indices sequence length is longer than the specified maximum sequence length "
f"for this model ({len(ids)} > {self.model_max_length}). Running this sequence through the model "
"will result in indexing errors"
)
self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
@classmethod
def register_for_auto_class(cls, auto_class="AutoTokenizer"):
"""
Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
library are already mapped with `AutoTokenizer`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
The auto class to register this new tokenizer with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
def apply_chat_template(
self,
conversation: Union[list[dict[str, str]], list[list[dict[str, str]]]],
tools: Optional[list[Union[dict, Callable]]] = None,
documents: Optional[list[dict[str, str]]] = None,
chat_template: Optional[str] = None,
add_generation_prompt: bool = False,
continue_final_message: bool = False,
tokenize: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: bool = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_dict: bool = False,
return_assistant_tokens_mask: bool = False,
tokenizer_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> Union[str, list[int], list[str], list[list[int]], BatchEncoding]:
"""
Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
determine the format and control tokens to use when converting.
Args:
conversation (Union[list[dict[str, str]], list[list[dict[str, str]]]]): A list of dicts
with "role" and "content" keys, representing the chat history so far.
tools (`list[Union[Dict, Callable]]`, *optional*):
A list of tools (callable functions) that will be accessible to the model. If the template does not
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
giving the name, description and argument types for the tool. See our
[tool use guide](https://huggingface.co/docs/transformers/en/chat_extras#passing-tools)
for more information.
documents (`list[dict[str, str]]`, *optional*):
A list of dicts representing documents that will be accessible to the model if it is performing RAG
(retrieval-augmented generation). If the template does not support RAG, this argument will have no
effect. We recommend that each document should be a dict containing "title" and "text" keys.
chat_template (`str`, *optional*):
A Jinja template to use for this conversion. It is usually not necessary to pass anything to this
argument, as the model's template will be used by default.
add_generation_prompt (bool, *optional*):
If this is set, a prompt with the token(s) that indicate
the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
Note that this argument will be passed to the chat template, and so it must be supported in the
template for this argument to have any effect.
continue_final_message (bool, *optional*):
If this is set, the chat will be formatted so that the final
message in the chat is open-ended, without any EOS tokens. The model will continue this message
rather than starting a new one. This allows you to "prefill" part of
the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
tokenize (`bool`, defaults to `True`):
Whether to tokenize the output. If `False`, the output will be a string.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, defaults to `False`):
Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
max_length (`int`, *optional*):
Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
not specified, the tokenizer's `max_length` attribute will be used as a default.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
return_dict (`bool`, defaults to `False`):
Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
tokenizer_kwargs (`dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer.
return_assistant_tokens_mask (`bool`, defaults to `False`):
Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant,
the mask will contain 1. For user and system tokens, the mask will contain 0.
This functionality is only available for chat templates that support it via the `{% generation %}` keyword.
**kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template.
Returns:
`Union[list[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This
output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is
set, will return a dict of tokenizer outputs instead.
"""
if return_dict and not tokenize:
raise ValueError(
"`return_dict=True` is incompatible with `tokenize=False`, because there is no dict "
"of tokenizer outputs to return."
)
if return_assistant_tokens_mask and not return_dict:
raise ValueError("`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`")
if tokenizer_kwargs is None:
tokenizer_kwargs = {}
chat_template = self.get_chat_template(chat_template, tools)
if isinstance(conversation, (list, tuple)) and (
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "messages")
):
conversations = conversation
is_batched = True
else:
conversations = [conversation]
is_batched = False
if continue_final_message:
if add_generation_prompt:
raise ValueError(
"continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead."
)
if return_assistant_tokens_mask:
raise ValueError("continue_final_message is not compatible with return_assistant_tokens_mask.")
template_kwargs = {**self.special_tokens_map, **kwargs} # kwargs overwrite special tokens if both are present
rendered_chat, generation_indices = render_jinja_template(
conversations=conversations,
tools=tools,
documents=documents,
chat_template=chat_template,
return_assistant_tokens_mask=return_assistant_tokens_mask,
continue_final_message=continue_final_message,
add_generation_prompt=add_generation_prompt,
**template_kwargs,
)
if not is_batched:
rendered_chat = rendered_chat[0]
if tokenize:
out = self(
rendered_chat,
padding=padding,
truncation=truncation,
max_length=max_length,
add_special_tokens=False,
return_tensors=return_tensors,
**tokenizer_kwargs,
)
if return_dict:
if return_assistant_tokens_mask:
assistant_masks = []
if is_batched or return_tensors:
input_ids = out["input_ids"]
else:
input_ids = [out["input_ids"]]
for i in range(len(input_ids)):
current_mask = [0] * len(input_ids[i])
for assistant_start_char, assistant_end_char in generation_indices[i]:
start_token = out.char_to_token(i, assistant_start_char)
end_token = out.char_to_token(i, assistant_end_char - 1)
if start_token is None:
# start_token is out of bounds maybe due to truncation.
break
for token_id in range(start_token, end_token + 1 if end_token else len(input_ids[i])):
current_mask[token_id] = 1
assistant_masks.append(current_mask)
if not is_batched and not return_tensors:
assistant_masks = assistant_masks[0]
out["assistant_masks"] = assistant_masks
if return_tensors:
out.convert_to_tensors(tensor_type=return_tensors)
return out
else:
return out["input_ids"]
else:
return rendered_chat
def encode_message_with_chat_template(
self,
message: dict[str, str],
conversation_history: Optional[list[dict[str, str]]] = None,
**kwargs,
) -> list[int]:
"""
Tokenize a single message. This method is a convenience wrapper around `apply_chat_template` that allows you
to tokenize messages one by one. This is useful for things like token-by-token streaming.
This method is not guaranteed to be perfect. For some models, it may be impossible to robustly tokenize
single messages. For example, if the chat template adds tokens after each message, but also has a prefix that
is added to the entire chat, it will be impossible to distinguish a chat-start-token from a message-start-token.
In these cases, this method will do its best to find the correct tokenization, but it may not be perfect.
**Note:** This method does not support `add_generation_prompt`. If you want to add a generation prompt,
you should do it separately after tokenizing the conversation.
Args:
message (`dict`):
A dictionary with "role" and "content" keys, representing the message to tokenize.
conversation_history (`list[dict]`, *optional*):
A list of dicts with "role" and "content" keys, representing the chat history so far. If you are
tokenizing messages one by one, you should pass the previous messages in the conversation here.
**kwargs:
Additional kwargs to pass to the `apply_chat_template` method.
Returns:
`list[int]`: A list of token ids representing the tokenized message.
"""
if "add_generation_prompt" in kwargs:
raise ValueError(
"`encode_message_with_chat_template` does not support `add_generation_prompt`. Please add the generation prompt "
"separately."
)
if conversation_history is None or len(conversation_history) == 0:
return self.apply_chat_template([message], add_generation_prompt=False, tokenize=True, **kwargs)
conversation = conversation_history + [message]
tokens = self.apply_chat_template(conversation, add_generation_prompt=False, tokenize=True, **kwargs)
prefix_tokens = self.apply_chat_template(
conversation_history, add_generation_prompt=False, tokenize=True, **kwargs
)
# It's possible that the prefix tokens are not a prefix of the full list of tokens.
# For example, if the prefix is `<s>User: Hi` and the full conversation is `<s>User: Hi</s><s>Assistant: Hello`.
# In this case, we can't simply find the prefix, so we have to do something a bit more subtle.
# We look for the first place where the tokens differ, and that's our split point.
# This is not perfect, but it's the best we can do without a token-level API.
# To make this more robust, we could do a diff and find the longest common subsequence, but this is
# a good first approximation.
# This is particularly important for models like Llama3 that have changed their chat template to include
# EOS tokens after user messages.
min_len = min(len(prefix_tokens), len(tokens))
for i in range(min_len):
if prefix_tokens[i] != tokens[i]:
return tokens[i:]
return tokens[min_len:]
def get_chat_template(self, chat_template: Optional[str] = None, tools: Optional[list[dict]] = None) -> str:
"""
Retrieve the chat template string used for tokenizing chat messages. This template is used
internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
template for better generation tracking.
Args:
chat_template (`str`, *optional*):
A Jinja template or the name of a template to use for this conversion.
It is usually not necessary to pass anything to this argument,
as the model's template will be used by default.
tools (`list[Dict]`, *optional*):
A list of tools (callable functions) that will be accessible to the model. If the template does not
support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
giving the name, description and argument types for the tool. See our
[chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
for more information.
Returns:
`str`: The chat template string.
"""
# First, handle the cases when the model has a dict of multiple templates
if isinstance(self.chat_template, dict):
template_dict = self.chat_template
if chat_template is not None and chat_template in template_dict:
# The user can pass the name of a template to the chat template argument instead of an entire template
chat_template = template_dict[chat_template]
elif chat_template is None:
if tools is not None and "tool_use" in template_dict:
chat_template = template_dict["tool_use"]
elif "default" in template_dict:
chat_template = template_dict["default"]
else:
raise ValueError(
"This model has multiple chat templates with no default specified! Please either pass a chat "
"template or the name of the template you wish to use to the `chat_template` argument. Available "
f"template names are {sorted(template_dict.keys())}."
)
elif chat_template is None:
# These are the cases when the model has a single template
# priority: `chat_template` argument > `tokenizer.chat_template`
if self.chat_template is not None:
chat_template = self.chat_template
else:
raise ValueError(
"Cannot use chat template functions because tokenizer.chat_template is not set and no template "
"argument was passed! For information about writing templates and setting the "
"tokenizer.chat_template attribute, please see the documentation at "
"https://huggingface.co/docs/transformers/main/en/chat_templating"
)
return chat_template
def save_chat_templates(
self,
save_directory: Union[str, os.PathLike],
tokenizer_config: dict,
filename_prefix: Optional[str],
save_jinja_files: bool,
):
"""
Writes chat templates out to the save directory if we're using the new format, and removes them from
the tokenizer config if present. If we're using the legacy format, it doesn't write any files, and instead
writes the templates to the tokenizer config in the correct format.
"""
chat_template_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + CHAT_TEMPLATE_FILE
)
chat_template_dir = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + CHAT_TEMPLATE_DIR
)
saved_raw_chat_template_files = []
if save_jinja_files and isinstance(self.chat_template, str):
# New format for single templates is to save them as chat_template.jinja
with open(chat_template_file, "w", encoding="utf-8") as f:
f.write(self.chat_template)
logger.info(f"chat template saved in {chat_template_file}")
saved_raw_chat_template_files.append(chat_template_file)
if "chat_template" in tokenizer_config:
tokenizer_config.pop("chat_template") # To ensure it doesn't somehow end up in the config too
elif save_jinja_files and isinstance(self.chat_template, dict):
# New format for multiple templates is to save the default as chat_template.jinja
# and the other templates in the chat_templates/ directory
for template_name, template in self.chat_template.items():
if template_name == "default":
with open(chat_template_file, "w", encoding="utf-8") as f:
f.write(self.chat_template["default"])
logger.info(f"chat template saved in {chat_template_file}")
saved_raw_chat_template_files.append(chat_template_file)
else:
Path(chat_template_dir).mkdir(exist_ok=True)
template_filepath = os.path.join(chat_template_dir, f"{template_name}.jinja")
with open(template_filepath, "w", encoding="utf-8") as f:
f.write(template)
logger.info(f"chat template saved in {template_filepath}")
saved_raw_chat_template_files.append(template_filepath)
if "chat_template" in tokenizer_config:
tokenizer_config.pop("chat_template") # To ensure it doesn't somehow end up in the config too
elif isinstance(self.chat_template, dict):
# Legacy format for multiple templates:
# chat template dicts are saved to the config as lists of dicts with fixed key names.
tokenizer_config["chat_template"] = [{"name": k, "template": v} for k, v in self.chat_template.items()]
elif self.chat_template is not None:
# Legacy format for single templates: Just make them a key in tokenizer_config.json
tokenizer_config["chat_template"] = self.chat_template
return tokenizer_config, saved_raw_chat_template_files
def get_fast_tokenizer_file(tokenization_files: list[str]) -> str:
"""
Get the tokenization file to use for this version of transformers.
Args:
tokenization_files (`list[str]`): The list of available configuration files.
Returns:
`str`: The tokenization file to use.
"""
tokenizer_files_map = {}
for file_name in tokenization_files:
search = _re_tokenizer_file.search(file_name)
if search is not None:
v = search.groups()[0]
tokenizer_files_map[v] = file_name
available_versions = sorted(tokenizer_files_map.keys())
# Defaults to FULL_TOKENIZER_FILE and then try to look at some newer versions.
tokenizer_file = FULL_TOKENIZER_FILE
transformers_version = version.parse(__version__)
for v in available_versions:
if version.parse(v) <= transformers_version:
tokenizer_file = tokenizer_files_map[v]
else:
# No point going further since the versions are sorted.
break
return tokenizer_file
# Shared helper to locate a SentencePiece model file for a repo/path
def find_sentencepiece_model_file(pretrained_model_name_or_path, **kwargs):
"""
Find any .model file (SentencePiece model) in the model directory or Hub repo.
Tries known filenames first ("tokenizer.model", "spm.model"), then scans local dir,
and as a last resort lists files on the Hub to find any .model.
Returns the filename (str) relative to the repo root or directory if found, else None.
"""
from .utils.hub import has_file
# Try common names first
for candidate in ("tokenizer.model", "spm.model"):
try:
if has_file(
pretrained_model_name_or_path,
candidate,
revision=kwargs.get("revision"),
token=kwargs.get("token"),
cache_dir=kwargs.get("cache_dir"),
local_files_only=kwargs.get("local_files_only", False),
):
return candidate
except Exception:
pass
subfolder = kwargs.get("subfolder", "")
local_files_only = kwargs.get("local_files_only", False)
# Local directory scan
if os.path.isdir(pretrained_model_name_or_path):
dir_path = (
os.path.join(pretrained_model_name_or_path, subfolder) if subfolder else pretrained_model_name_or_path
)
if os.path.isdir(dir_path):
for filename in os.listdir(dir_path):
if filename.endswith(".model"):
return filename if not subfolder else os.path.join(subfolder, filename)
# Hub listing if allowed
if not local_files_only:
try:
from huggingface_hub import list_repo_tree
entries = list_repo_tree(
repo_id=pretrained_model_name_or_path,
revision=kwargs.get("revision"),
path_in_repo=subfolder if subfolder else None,
recursive=False,
token=kwargs.get("token"),
)
for entry in entries:
if entry.path.endswith(".model"):
return entry.path if not subfolder else entry.path.removeprefix(f"{subfolder}/")
except Exception:
pass
return None
def load_vocab_and_merges(pretrained_model_name_or_path, **kwargs):
"""
Resolve and load tokenizer vocabulary files from a repo/path.
Priority order:
1. Load ``vocab.json`` (WordLevel/WordPiece/BPE fast tokenizers)
2. Load ``vocab.txt`` when only a WordPiece vocab is available
3. Optionally load ``merges.txt`` (BPE tokenizers)
Returns:
tuple (vocab: dict|None, merges: list[tuple[str,str]]|None, files_loaded: list[str])
"""
files_loaded = []
vocab = None
merges = None
try:
resolved_vocab_file = cached_file(
pretrained_model_name_or_path,
"vocab.json",
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
except Exception:
resolved_vocab_file = None
if resolved_vocab_file is not None:
try:
with open(resolved_vocab_file, "r", encoding="utf-8") as vf:
vocab = json.load(vf)
files_loaded.append("vocab.json")
except Exception:
vocab = None
# Fallback to vocab.txt (WordPiece-style vocabularies)
if vocab is None:
try:
resolved_vocab_txt = cached_file(
pretrained_model_name_or_path,
"vocab.txt",
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
except Exception:
resolved_vocab_txt = None
if resolved_vocab_txt is not None:
try:
vocab = OrderedDict()
with open(resolved_vocab_txt, "r", encoding="utf-8") as vf:
for index, token in enumerate(vf):
token = token.rstrip("\n")
vocab[token] = index
files_loaded.append("vocab.txt")
except Exception:
vocab = None
try:
resolved_merges_file = cached_file(
pretrained_model_name_or_path,
"merges.txt",
cache_dir=kwargs.get("cache_dir"),
force_download=kwargs.get("force_download", False),
proxies=kwargs.get("proxies"),
token=kwargs.get("token"),
revision=kwargs.get("revision"),
local_files_only=kwargs.get("local_files_only", False),
subfolder=kwargs.get("subfolder", ""),
)
except Exception:
resolved_merges_file = None
if resolved_merges_file is not None:
try:
merges = []
with open(resolved_merges_file, "r", encoding="utf-8") as mf:
for line in mf:
line = line.strip()
if line and not line.startswith("#"):
parts = line.split()
if len(parts) == 2:
merges.append((parts[0], parts[1]))
files_loaded.append("merges.txt")
except Exception:
merges = None
return vocab, merges, files_loaded
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
PreTrainedTokenizerBase.push_to_hub = copy_func(PreTrainedTokenizerBase.push_to_hub)
if PreTrainedTokenizerBase.push_to_hub.__doc__ is not None:
PreTrainedTokenizerBase.push_to_hub.__doc__ = PreTrainedTokenizerBase.push_to_hub.__doc__.format(
object="tokenizer", object_class="AutoTokenizer", object_files="tokenizer files"
)
def _get_prepend_scheme(add_prefix_space: bool, original_tokenizer) -> str:
if add_prefix_space:
prepend_scheme = "always"
if not getattr(original_tokenizer, "legacy", True):
prepend_scheme = "first"
else:
prepend_scheme = "never"
return prepend_scheme
def generate_merges(vocab, vocab_scores: Optional[dict[str, float]] = None):
reverse = vocab_scores is not None
vocab_scores = dict(vocab_scores) if reverse else vocab
merges = []
for merge, piece_score in vocab_scores.items():
local = []
for index in range(1, len(merge)):
piece_l, piece_r = merge[:index], merge[index:]
if piece_l in vocab and piece_r in vocab:
local.append((piece_l, piece_r, piece_score))
local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]))
merges.extend(local)
merges = sorted(merges, key=lambda val: (val[2], len(val[0]), len(val[1])), reverse=reverse)
merges = [(val[0], val[1]) for val in merges]
return merges
|
PreTrainedTokenizerBase
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass2.py
|
{
"start": 2380,
"end": 2849
}
|
class ____(Generic[P1, P2, P3]): ...
pa1 = ClassPA()
reveal_type(pa1, expected_text="ClassPA[..., ..., ...]")
pa2 = ClassPA[[str]]()
reveal_type(pa2, expected_text="ClassPA[(str), (str), (str)]")
pa3 = ClassPA[..., [float]]()
reveal_type(pa3, expected_text="ClassPA[..., (float), (float)]")
pa4 = ClassPA[..., [int, int], [float]]()
reveal_type(pa4, expected_text="ClassPA[..., (int, int), (float)]")
# This should generate an error because P1 depends on P2.
|
ClassPA
|
python
|
numba__numba
|
numba/tests/test_withlifting.py
|
{
"start": 34119,
"end": 35262
}
|
class ____(TestCase):
# Tests for miscellaneous objmode issues. Run serially.
_numba_parallel_test_ = False
@linux_only
@TestCase.run_test_in_subprocess
def test_no_fork_in_compilation(self):
# Checks that there is no fork/clone/execve during compilation, see
# issue #7881. This needs running in a subprocess as the offending fork
# call that triggered #7881 occurs on the first call to uuid1 as it's
# part if the initialisation process for that function (gets hardware
# address of machine).
if not strace_supported():
# Needs strace support.
self.skipTest("strace support missing")
def force_compile():
@njit('void()') # force compilation
def f():
with numba.objmode():
pass
# capture these syscalls:
syscalls = ['fork', 'clone', 'execve']
# check that compilation does not trigger fork, clone or execve
strace_data = strace(force_compile, syscalls)
self.assertFalse(strace_data)
if __name__ == '__main__':
unittest.main()
|
TestMisc
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_privacy_urls.py
|
{
"start": 18778,
"end": 18989
}
|
class ____(PublicUserProfileMixin, TestCase):
def login(self):
return self.client.login(username="tester", password="test")
def is_admin(self):
return False
|
PublicUserProfileUserAccessTest
|
python
|
sympy__sympy
|
sympy/plotting/series.py
|
{
"start": 49706,
"end": 57667
}
|
class ____(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, label="", **kwargs):
super().__init__(**kwargs)
self.expr = expr if callable(expr) else sympify(expr)
self._label = str(self.expr) if label is None else label
self._latex_label = latex(self.expr) if label is None else label
self.ranges = [var_start_end]
self._cast = complex
# for complex-related data series, this determines what data to return
# on the y-axis
self._return = kwargs.get("return", None)
self._post_init()
if not self._interactive_ranges:
# NOTE: the following check is only possible when the minimum and
# maximum values of a plotting range are numeric
start, end = [complex(t) for t in self.ranges[0][1:]]
if im(start) != im(end):
raise ValueError(
"%s requires the imaginary " % self.__class__.__name__ +
"part of the start and end values of the range "
"to be the same.")
if self.adaptive and self._return:
warnings.warn("The adaptive algorithm is unable to deal with "
"complex numbers. Automatically switching to uniform meshing.")
self.adaptive = False
@property
def nb_of_points(self):
return self.n[0]
@nb_of_points.setter
def nb_of_points(self, v):
self.n = v
def __str__(self):
def f(t):
if isinstance(t, complex):
if t.imag != 0:
return t
return t.real
return t
pre = "interactive " if self.is_interactive else ""
post = ""
if self.is_interactive:
post = " and parameters " + str(tuple(self.params.keys()))
wrapper = _get_wrapper_for_expr(self._return)
return pre + "cartesian line: %s for %s over %s" % (
wrapper % self.expr,
str(self.var),
str((f(self.start), f(self.end))),
) + post
def get_points(self):
"""Return lists of coordinates for plotting. Depending on the
``adaptive`` option, this function will either use an adaptive algorithm
or it will uniformly sample the expression over the provided range.
This function is available for back-compatibility purposes. Consider
using ``get_data()`` instead.
Returns
=======
x : list
List of x-coordinates
y : list
List of y-coordinates
"""
return self._get_data_helper()
def _adaptive_sampling(self):
try:
if callable(self.expr):
f = self.expr
else:
f = lambdify([self.var], self.expr, self.modules)
x, y = self._adaptive_sampling_helper(f)
except Exception as err: # noqa: BLE001
warnings.warn(
"The evaluation with %s failed.\n" % (
"NumPy/SciPy" if not self.modules else self.modules) +
"{}: {}\n".format(type(err).__name__, err) +
"Trying to evaluate the expression with Sympy, but it might "
"be a slow operation."
)
f = lambdify([self.var], self.expr, "sympy")
x, y = self._adaptive_sampling_helper(f)
return x, y
def _adaptive_sampling_helper(self, f):
"""The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
.. [1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
np = import_module('numpy')
x_coords = []
y_coords = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
# Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
if self.xscale == 'log':
xnew = 10**(np.log10(p[0]) + random * (np.log10(q[0]) -
np.log10(p[0])))
else:
xnew = p[0] + random * (q[0] - p[0])
ynew = _adaptive_eval(f, xnew)
new_point = np.array([xnew, ynew])
# Maximum depth
if depth > self.depth:
x_coords.append(q[0])
y_coords.append(q[1])
# Sample to depth of 6 (whether the line is flat or not)
# without using linspace (to avoid aliasing).
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
# Sample ten points if complex values are encountered
# at both ends. If there is a real value in between, then
# sample those points further.
elif p[1] is None and q[1] is None:
if self.xscale == 'log':
xarray = np.logspace(p[0], q[0], 10)
else:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if not all(y is None for y in yarray):
for i in range(len(yarray) - 1):
if not (yarray[i] is None and yarray[i + 1] is None):
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
# Sample further if one of the end points in None (i.e. a
# complex value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
x_coords.append(q[0])
y_coords.append(q[1])
f_start = _adaptive_eval(f, self.start.real)
f_end = _adaptive_eval(f, self.end.real)
x_coords.append(self.start.real)
y_coords.append(f_start)
sample(np.array([self.start.real, f_start]),
np.array([self.end.real, f_end]), 0)
return (x_coords, y_coords)
def _uniform_sampling(self):
np = import_module('numpy')
x, result = self._evaluate()
_re, _im = np.real(result), np.imag(result)
_re = self._correct_shape(_re, x)
_im = self._correct_shape(_im, x)
return x, _re, _im
def _get_data_helper(self):
"""Returns coordinates that needs to be postprocessed.
"""
np = import_module('numpy')
if self.adaptive and (not self.only_integers):
x, y = self._adaptive_sampling()
return [np.array(t) for t in [x, y]]
x, _re, _im = self._uniform_sampling()
if self._return is None:
# The evaluation could produce complex numbers. Set real elements
# to NaN where there are non-zero imaginary elements
_re[np.invert(np.isclose(_im, np.zeros_like(_im)))] = np.nan
elif self._return == "real":
pass
elif self._return == "imag":
_re = _im
elif self._return == "abs":
_re = np.sqrt(_re**2 + _im**2)
elif self._return == "arg":
_re = np.arctan2(_im, _re)
else:
raise ValueError("`_return` not recognized. "
"Received: %s" % self._return)
return x, _re
|
LineOver1DRangeSeries
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dask/tests/test_client.py
|
{
"start": 449,
"end": 2267
}
|
class ____:
async def test_with_task(self):
flow_run_id = None
@task
def test_task():
return 42
@flow
def test_flow():
nonlocal flow_run_id
flow_run_id = FlowRunContext.get().flow_run.id
with PrefectDaskClient() as client:
future = client.submit(test_task)
return future.result()
assert test_flow() == 42
prefect_client = get_client()
assert flow_run_id is not None
task_runs = await prefect_client.read_task_runs(
flow_run_filter=FlowRunFilter(id=FlowRunFilterId(any_=[flow_run_id]))
)
assert len(task_runs) == 1
async def test_with_function(self):
def func():
return 42
with PrefectDaskClient() as client:
future = client.submit(func)
assert future.result() == 42
async def test_tracks_dependencies(self):
flow_run_id = None
@task
def test_task(x):
return x
@flow
def test_flow():
nonlocal flow_run_id
flow_run_id = FlowRunContext.get().flow_run.id
with PrefectDaskClient() as client:
future1 = client.submit(test_task, 42)
future2 = client.submit(test_task, future1)
return future2.result()
assert test_flow() == 42
prefect_client = get_client()
assert flow_run_id is not None
task_runs = await prefect_client.read_task_runs(
flow_run_filter=FlowRunFilter(id=FlowRunFilterId(any_=[flow_run_id])),
sort=TaskRunSort.END_TIME_DESC,
)
assert len(task_runs) == 2
assert task_runs[0].task_inputs == {"x": [TaskRunResult(id=task_runs[1].id)]}
|
TestSubmit
|
python
|
sphinx-doc__sphinx
|
sphinx/search/ru.py
|
{
"start": 193,
"end": 596
}
|
class ____(SearchLanguage):
lang = 'ru'
language_name = 'Russian'
js_stemmer_rawcode = 'russian-stemmer.js'
stopwords = RUSSIAN_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('russian')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
|
SearchRussian
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/ec.py
|
{
"start": 7902,
"end": 8057
}
|
class ____(EllipticCurve):
name = "sect233k1"
key_size = 232
group_order = 0x8000000000000000000000000000069D5BB915BCD46EFB1AD5F173ABDF
|
SECT233K1
|
python
|
kamyu104__LeetCode-Solutions
|
Python/smallest-string-with-swaps.py
|
{
"start": 1227,
"end": 2205
}
|
class ____(object):
def smallestStringWithSwaps(self, s, pairs):
"""
:type s: str
:type pairs: List[List[int]]
:rtype: str
"""
def dfs(i, adj, lookup, component):
lookup.add(i)
component.append(i)
for j in adj[i]:
if j in lookup:
continue
dfs(j, adj, lookup, component)
adj = collections.defaultdict(list)
for i, j in pairs:
adj[i].append(j)
adj[j].append(i)
lookup = set()
result = list(s)
for i in xrange(len(s)):
if i in lookup:
continue
component = []
dfs(i, adj, lookup, component)
component.sort()
chars = sorted(result[k] for k in component)
for comp, char in itertools.izip(component, chars):
result[comp] = char
return "".join(result)
|
Solution2
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 15209,
"end": 16035
}
|
class ____(DagsterError, AttributeError):
# inherits from AttributeError as it is raised within a __getattr__ call... used to support
# object hasattr method
"""Indicates that an unknown resource was accessed in the body of an execution step. May often
happen by accessing a resource in the compute function of an op without first supplying the
op with the correct `required_resource_keys` argument.
"""
def __init__(self, resource_name, *args, **kwargs):
self.resource_name = check.str_param(resource_name, "resource_name")
msg = (
f"Unknown resource `{resource_name}`. Specify `{resource_name}` as a required resource "
"on the compute / config function that accessed it."
)
super().__init__(msg, *args, **kwargs)
|
DagsterUnknownResourceError
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/objects.py
|
{
"start": 43353,
"end": 43584
}
|
class ____(ObjectBaseModel):
"""An ORM representation of account info."""
key: str = Field(default=..., description="Account info key")
value: dict[str, Any] = Field(default=..., description="Account info")
|
Configuration
|
python
|
pytorch__pytorch
|
torch/testing/_internal/common_fsdp.py
|
{
"start": 32211,
"end": 34032
}
|
class ____(nn.Sequential):
def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False):
modules: list[nn.Module] = [
# Use multiplier of 3 to exercise uneven case
MLP(mlp_dim, dim_multiplier=3),
MLP(mlp_dim),
MLP(mlp_dim, dim_multiplier=3),
]
if with_seq_parallel:
modules.append(nn.LayerNorm(mlp_dim, bias=False))
super().__init__(*modules)
self.with_seq_parallel = with_seq_parallel
def parallelize(
self,
tp_mesh: DeviceMesh,
dp_mesh: DeviceMesh,
use_activation_checkpointing: bool,
**fsdp_kwargs,
) -> "MLPStack":
parallelize_plan = {
# Pass `use_local_output=False` to keep as DTensor to preserve
# uneven activation dims
"0.in_proj": ColwiseParallel(use_local_output=False),
"0.out_proj": RowwiseParallel(use_local_output=False),
"1.in_proj": ColwiseParallel(use_local_output=False),
"1.out_proj": RowwiseParallel(use_local_output=False),
"2.in_proj": ColwiseParallel(use_local_output=False),
"2.out_proj": RowwiseParallel(output_layouts=Shard(1))
if self.with_seq_parallel
else RowwiseParallel(),
}
if self.with_seq_parallel:
parallelize_plan["3"] = SequenceParallel(sequence_dim=1)
parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan)
for module in self:
if isinstance(module, nn.LayerNorm):
continue
if use_activation_checkpointing:
checkpoint(module)
fully_shard(module, mesh=dp_mesh, **fsdp_kwargs)
fully_shard(self, mesh=dp_mesh, **fsdp_kwargs)
return self
|
MLPStack
|
python
|
zarr-developers__zarr-python
|
src/zarr/core/buffer/core.py
|
{
"start": 609,
"end": 986
}
|
class ____(Protocol):
"""Protocol for the array-like type that underlie Buffer"""
@property
def dtype(self) -> np.dtype[Any]: ...
@property
def ndim(self) -> int: ...
@property
def size(self) -> int: ...
def __getitem__(self, key: slice) -> Self: ...
def __setitem__(self, key: slice, value: Any) -> None: ...
@runtime_checkable
|
ArrayLike
|
python
|
PrefectHQ__prefect
|
src/prefect/server/orchestration/core_policy.py
|
{
"start": 3547,
"end": 4792
}
|
class ____(TaskRunOrchestrationPolicy):
"""
Orchestration rules that run against task-run-state transitions in priority order.
"""
@staticmethod
def priority() -> list[
Union[
type[BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]],
type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]],
]
]:
return cast(
list[
Union[
type[
BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]
],
type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]],
]
],
[
CacheRetrieval,
HandleTaskTerminalStateTransitions,
PreventRunningTasksFromStoppedFlows,
SecureTaskConcurrencySlots, # retrieve cached states even if slots are full
CopyScheduledTime,
WaitForScheduledTime,
RetryFailedTasks,
RenameReruns,
UpdateFlowRunTrackerOnTasks,
CacheInsertion,
ReleaseTaskConcurrencySlots,
],
)
|
CoreTaskPolicy
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_operators.py
|
{
"start": 33252,
"end": 37591
}
|
class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
def test_contains(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def contains(self, other, **kw):
return self.op("->")(other)
self.assert_compile(Column("x", MyType()).contains(5), "x -> :x_1")
def test_getitem(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
self.assert_compile(Column("x", MyType())[5], "x -> :x_1")
def test_op_not_an_iterator(self):
# see [ticket:2726]
class MyType(UserDefinedType):
class comparator_factory(UserDefinedType.Comparator):
def __getitem__(self, index):
return self.op("->")(index)
col = Column("x", MyType())
assert not isinstance(col, collections_abc.Iterable)
@testing.combinations(
(operators.lshift, OperatorClass.BITWISE),
(operators.rshift, OperatorClass.BITWISE),
(operators.matmul, OperatorClass.MATH),
(operators.getitem, OperatorClass.INDEXABLE),
)
def test_not_implemented_operators(self, op, operator_class):
"""test operators that are availble but not implemented by default.
this might be semantically different from the operator not being
present in the operator class though the effect is the same (that is,
we could just not include lshift/rshift/matmul in any operator class,
do away with _unsupported_impl() and the path to implement them would
be the same). So it's not totally clear if we should keep using
_unsupported_impl() long term. However at least for now because we
only emit a deprecation warning in the other case, this is still
appropriately a separate concept.
"""
class MyType(TypeEngine):
operator_classes = operator_class
with expect_raises_message(
NotImplementedError,
f"Operator {op.__name__!r} is not supported on this expression",
):
op(column("q", MyType()), "test")
def test_lshift(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __lshift__(self, other):
return self.op("->")(other)
self.assert_compile(Column("x", MyType()) << 5, "x -> :x_1")
def test_rlshift(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __rlshift__(self, other):
return self.op("->")(other)
self.assert_compile(5 << Column("x", MyType()), "x -> :x_1")
def test_rshift(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __rshift__(self, other):
return self.op("->")(other)
self.assert_compile(Column("x", MyType()) >> 5, "x -> :x_1")
def test_rrshift(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __rrshift__(self, other):
return self.op("->")(other)
self.assert_compile(5 >> Column("x", MyType()), "x -> :x_1")
def test_matmul(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __matmul__(self, other):
return self.op("->")(other)
self.assert_compile(Column("x", MyType()) @ 5, "x -> :x_1")
def test_rmatmul(self):
class MyType(UserDefinedType):
cache_ok = True
class comparator_factory(UserDefinedType.Comparator):
def __rmatmul__(self, other):
return self.op("->")(other)
self.assert_compile(5 @ Column("x", MyType()), "x -> :x_1")
|
ExtensionOperatorTest
|
python
|
scipy__scipy
|
scipy/stats/_qmc.py
|
{
"start": 70966,
"end": 84703
}
|
class ____(QMCEngine):
"""Poisson disk sampling.
Parameters
----------
d : int
Dimension of the parameter space.
radius : float
Minimal distance to keep between points when sampling new candidates.
hypersphere : {"volume", "surface"}, optional
Sampling strategy to generate potential candidates to be added in the
final sample. Default is "volume".
* ``volume``: original Bridson algorithm as described in [1]_.
New candidates are sampled *within* the hypersphere.
* ``surface``: only sample the surface of the hypersphere.
ncandidates : int
Number of candidates to sample per iteration. More candidates result
in a denser sampling as more candidates can be accepted per iteration.
optimization : {None, "random-cd", "lloyd"}, optional
Whether to use an optimization scheme to improve the quality after
sampling. Note that this is a post-processing step that does not
guarantee that all properties of the sample will be conserved.
Default is None.
* ``random-cd``: random permutations of coordinates to lower the
centered discrepancy. The best sample based on the centered
discrepancy is constantly updated. Centered discrepancy-based
sampling shows better space-filling robustness toward 2D and 3D
subprojections compared to using other discrepancy measures.
* ``lloyd``: Perturb samples using a modified Lloyd-Max algorithm.
The process converges to equally spaced samples.
.. versionadded:: 1.10.0
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
.. versionchanged:: 1.15.0
As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
transition from use of `numpy.random.RandomState` to
`numpy.random.Generator`, this keyword was changed from `seed` to
`rng`. For an interim period, both keywords will continue to work, although
only one may be specified at a time. After the interim period, function
calls using the `seed` keyword will emit warnings. Following a
deprecation period, the `seed` keyword will be removed.
l_bounds, u_bounds : array_like (d,)
Lower and upper bounds of target sample data.
Notes
-----
Poisson disk sampling is an iterative sampling strategy. Starting from
a seed sample, `ncandidates` are sampled in the hypersphere
surrounding the seed. Candidates below a certain `radius` or outside the
domain are rejected. New samples are added in a pool of sample seed. The
process stops when the pool is empty or when the number of required
samples is reached.
The maximum number of point that a sample can contain is directly linked
to the `radius`. As the dimension of the space increases, a higher radius
spreads the points further and help overcome the curse of dimensionality.
See the :ref:`quasi monte carlo tutorial <quasi-monte-carlo>` for more
details.
.. warning::
The algorithm is more suitable for low dimensions and sampling size
due to its iterative nature and memory requirements.
Selecting a small radius with a high dimension would
mean that the space could contain more samples than using lower
dimension or a bigger radius.
Some code taken from [2]_, written consent given on 31.03.2021
by the original author, Shamis, for free use in SciPy under
the 3-clause BSD.
References
----------
.. [1] Robert Bridson, "Fast Poisson Disk Sampling in Arbitrary
Dimensions." SIGGRAPH, 2007.
.. [2] `StackOverflow <https://stackoverflow.com/questions/66047540>`__.
Examples
--------
Generate a 2D sample using a `radius` of 0.2.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from matplotlib.collections import PatchCollection
>>> from scipy.stats import qmc
>>>
>>> rng = np.random.default_rng()
>>> radius = 0.2
>>> engine = qmc.PoissonDisk(d=2, radius=radius, rng=rng)
>>> sample = engine.random(20)
Visualizing the 2D sample and showing that no points are closer than
`radius`. ``radius/2`` is used to visualize non-intersecting circles.
If two samples are exactly at `radius` from each other, then their circle
of radius ``radius/2`` will touch.
>>> fig, ax = plt.subplots()
>>> _ = ax.scatter(sample[:, 0], sample[:, 1])
>>> circles = [plt.Circle((xi, yi), radius=radius/2, fill=False)
... for xi, yi in sample]
>>> collection = PatchCollection(circles, match_original=True)
>>> ax.add_collection(collection)
>>> _ = ax.set(aspect='equal', xlabel=r'$x_1$', ylabel=r'$x_2$',
... xlim=[0, 1], ylim=[0, 1])
>>> plt.show()
Such visualization can be seen as circle packing: how many circle can
we put in the space. It is a np-hard problem. The method `fill_space`
can be used to add samples until no more samples can be added. This is
a hard problem and parameters may need to be adjusted manually. Beware of
the dimension: as the dimensionality increases, the number of samples
required to fill the space increases exponentially
(curse-of-dimensionality).
"""
@_transition_to_rng('seed', replace_doc=False)
def __init__(
self,
d: IntNumber,
*,
radius: DecimalNumber = 0.05,
hypersphere: Literal["volume", "surface"] = "volume",
ncandidates: IntNumber = 30,
optimization: Literal["random-cd", "lloyd"] | None = None,
rng: SeedType = None,
l_bounds: "npt.ArrayLike | None" = None,
u_bounds: "npt.ArrayLike | None" = None,
) -> None:
# Used in `scipy.integrate.qmc_quad`
self._init_quad = {'d': d, 'radius': radius,
'hypersphere': hypersphere,
'ncandidates': ncandidates,
'optimization': optimization}
super()._initialize(d=d, optimization=optimization, rng=rng)
hypersphere_sample = {
"volume": self._hypersphere_volume_sample,
"surface": self._hypersphere_surface_sample
}
try:
self.hypersphere_method = hypersphere_sample[hypersphere]
except KeyError as exc:
message = (
f"{hypersphere!r} is not a valid hypersphere sampling"
f" method. It must be one of {set(hypersphere_sample)!r}")
raise ValueError(message) from exc
# size of the sphere from which the samples are drawn relative to the
# size of a disk (radius)
# for the surface sampler, all new points are almost exactly 1 radius
# away from at least one existing sample +eps to avoid rejection
self.radius_factor = 2 if hypersphere == "volume" else 1.001
self.radius = radius
self.radius_squared = self.radius**2
# sample to generate per iteration in the hypersphere around center
self.ncandidates = ncandidates
if u_bounds is None:
u_bounds = np.ones(d)
if l_bounds is None:
l_bounds = np.zeros(d)
self.l_bounds, self.u_bounds = _validate_bounds(
l_bounds=l_bounds, u_bounds=u_bounds, d=int(d)
)
with np.errstate(divide='ignore'):
self.cell_size = self.radius / np.sqrt(self.d)
self.grid_size = (
np.ceil((self.u_bounds - self.l_bounds) / self.cell_size)
).astype(int)
self._initialize_grid_pool()
def _initialize_grid_pool(self):
"""Sampling pool and sample grid."""
self.sample_pool = []
# Positions of cells
# n-dim value for each grid cell
self.sample_grid = np.empty(
np.append(self.grid_size, self.d),
dtype=np.float32
)
# Initialise empty cells with NaNs
self.sample_grid.fill(np.nan)
def _random(
self, n: IntNumber = 1, *, workers: IntNumber = 1
) -> np.ndarray:
"""Draw `n` in the interval ``[l_bounds, u_bounds]``.
Note that it can return fewer samples if the space is full.
See the note section of the class.
Parameters
----------
n : int, optional
Number of samples to generate in the parameter space. Default is 1.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
if n == 0 or self.d == 0:
return np.empty((n, self.d))
def in_limits(sample: np.ndarray) -> bool:
for i in range(self.d):
if (sample[i] > self.u_bounds[i] or sample[i] < self.l_bounds[i]):
return False
return True
def in_neighborhood(candidate: np.ndarray, n: int = 2) -> bool:
"""
Check if there are samples closer than ``radius_squared`` to the
`candidate` sample.
"""
indices = ((candidate - self.l_bounds) / self.cell_size).astype(int)
ind_min = np.maximum(indices - n, self.l_bounds.astype(int))
ind_max = np.minimum(indices + n + 1, self.grid_size)
# Check if the center cell is empty
if not np.isnan(self.sample_grid[tuple(indices)][0]):
return True
a = [slice(ind_min[i], ind_max[i]) for i in range(self.d)]
# guards against: invalid value encountered in less as we are
# comparing with nan and returns False. Which is wanted.
with np.errstate(invalid='ignore'):
if np.any(
np.sum(
np.square(candidate - self.sample_grid[tuple(a)]),
axis=self.d
) < self.radius_squared
):
return True
return False
def add_sample(candidate: np.ndarray) -> None:
self.sample_pool.append(candidate)
indices = ((candidate - self.l_bounds) / self.cell_size).astype(int)
self.sample_grid[tuple(indices)] = candidate
curr_sample.append(candidate)
curr_sample: list[np.ndarray] = []
if len(self.sample_pool) == 0:
# the pool is being initialized with a single random sample
add_sample(self.rng.uniform(self.l_bounds, self.u_bounds))
num_drawn = 1
else:
num_drawn = 0
# exhaust sample pool to have up to n sample
while len(self.sample_pool) and num_drawn < n:
# select a sample from the available pool
idx_center = rng_integers(self.rng, len(self.sample_pool))
center = self.sample_pool[idx_center]
del self.sample_pool[idx_center]
# generate candidates around the center sample
candidates = self.hypersphere_method(
center, self.radius * self.radius_factor, self.ncandidates
)
# keep candidates that satisfy some conditions
for candidate in candidates:
if in_limits(candidate) and not in_neighborhood(candidate):
add_sample(candidate)
num_drawn += 1
if num_drawn >= n:
break
self.num_generated += num_drawn
return np.array(curr_sample)
def fill_space(self) -> np.ndarray:
"""Draw ``n`` samples in the interval ``[l_bounds, u_bounds]``.
Unlike `random`, this method will try to add points until
the space is full. Depending on ``candidates`` (and to a lesser extent
other parameters), some empty areas can still be present in the sample.
.. warning::
This can be extremely slow in high dimensions or if the
``radius`` is very small-with respect to the dimensionality.
Returns
-------
sample : array_like (n, d)
QMC sample.
"""
return self.random(np.inf) # type: ignore[arg-type]
def reset(self) -> "PoissonDisk":
"""Reset the engine to base state.
Returns
-------
engine : PoissonDisk
Engine reset to its base state.
"""
super().reset()
self._initialize_grid_pool()
return self
def _hypersphere_volume_sample(
self, center: np.ndarray, radius: DecimalNumber,
candidates: IntNumber = 1
) -> np.ndarray:
"""Uniform sampling within hypersphere."""
# should remove samples within r/2
x = self.rng.standard_normal(size=(candidates, self.d))
ssq = np.sum(x**2, axis=1)
fr = radius * gammainc(self.d/2, ssq/2)**(1/self.d) / np.sqrt(ssq)
fr_tiled = np.tile(
fr.reshape(-1, 1), (1, self.d) # type: ignore[arg-type]
)
p = center + np.multiply(x, fr_tiled)
return p
def _hypersphere_surface_sample(
self, center: np.ndarray, radius: DecimalNumber,
candidates: IntNumber = 1
) -> np.ndarray:
"""Uniform sampling on the hypersphere's surface."""
vec = self.rng.standard_normal(size=(candidates, self.d))
vec /= np.linalg.norm(vec, axis=1)[:, None]
p = center + np.multiply(vec, radius)
return p
|
PoissonDisk
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware.py
|
{
"start": 13706,
"end": 13822
}
|
class ____:
async def process_spider_output_async(self, response, result):
yield
|
UniversalMiddlewareNoSync
|
python
|
getsentry__sentry
|
tests/sentry/users/api/endpoints/test_user_role_details.py
|
{
"start": 170,
"end": 1187
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-user-userrole-details"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(is_superuser=True)
self.login_as(user=self.user, superuser=True)
self.add_user_permission(self.user, "users.admin")
def test_fails_without_superuser(self) -> None:
self.user = self.create_user(is_superuser=False)
self.login_as(self.user)
UserRole.objects.create(name="test-role")
resp = self.get_response("me", "test-role")
assert resp.status_code == 403
self.user.update(is_superuser=True)
resp = self.get_response("me", "test-role")
assert resp.status_code == 403
def test_fails_without_users_admin_permission(self) -> None:
self.user = self.create_user(is_superuser=True)
self.login_as(self.user, superuser=True)
resp = self.get_response("me", "test-role")
assert resp.status_code == 403
@control_silo_test
|
UserUserRolesTest
|
python
|
huggingface__transformers
|
src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py
|
{
"start": 108811,
"end": 118854
}
|
class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~MMGroundingDinoProcessor.post_process_grounded_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
encoder_last_hidden_state_vision (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_last_hidden_state_text (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_vision_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the vision embeddings + one for the output of each
layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the vision encoder at the
output of each layer plus the initial embedding outputs.
encoder_text_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the text embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the text encoder at the output of
each layer plus the initial embedding outputs.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.num_queries` scoring bounding boxes are picked as
region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and
background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
encoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.two_stage=True`):
Logits of top `config.num_queries` scoring bounding boxes in the first stage.
encoder_pred_boxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.two_stage=True`):
Coordinates of top `config.num_queries` scoring bounding boxes in the first stage.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Encoded candidate labels sequence. Used in processor to post process object detection result.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
init_reference_points: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
encoder_last_hidden_state_vision: Optional[torch.FloatTensor] = None
encoder_last_hidden_state_text: Optional[torch.FloatTensor] = None
encoder_vision_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_text_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[tuple[torch.FloatTensor]]] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
encoder_logits: Optional[torch.FloatTensor] = None
encoder_pred_boxes: Optional[torch.FloatTensor] = None
input_ids: Optional[torch.LongTensor] = None
def build_label_maps(logits: torch.FloatTensor, input_ids: torch.LongTensor) -> tuple[torch.FloatTensor]:
"""
Computes a mapping between tokens and their corresponding labels, where `num_labels` is determined by the number of classes in the input prompt.
The function identifies segments of tokens between specific delimiter tokens and generates label maps for those segments.
Args:
logits (`torch.Tensor` of shape `(batch_size, seq_length, hidden_size)`):
The output logits from the model, where `hidden_size` corresponds to the dimension of the model's output features.
input_ids (`torch.Tensor` of shape `(batch_size, seq_length)`):
The input token IDs corresponding to the input prompt. For example, given the prompt "fish. shark.",
`input_ids` might look like `[101, 3869, 1012, 11420, 1012, 102]` where each number corresponds to a token including special tokens.
Returns:
tuple: A tuple containing label maps for each instance in the batch.
- label_maps (tuple of `torch.Tensor`):
A tuple of tensors, where each tensor in the tuple corresponds to an instance in the batch. Each tensor
has shape `(num_labels, hidden_size)` and contains binary values (0 or 1), where `1` indicates the tokens
that are associated with a specific label (class) between delimiter tokens, and `0` elsewhere.
Example:
Given an input prompt "fish. shark." and corresponding `input_ids` as `[101, 3869, 1012, 11420, 1012, 102]`:
- The function identifies the tokens for "fish" (IDs `[3869]`) and "shark" (IDs `[11420]`).
- The function then constructs label maps for these tokens, where each label map indicates which tokens
correspond to which label between the delimiter tokens (e.g., between the period `.`).
- The output is a tuple of label maps, one for each instance in the batch.
Note:
- `SPECIAL_TOKENS` should be a predefined list of tokens that are considered special (e.g., `[CLS]`, `[SEP]`, etc.).
"""
max_seq_len = logits.shape[-1]
# Add [PAD] token to the list of special tokens
delimiter_tokens = torch.tensor(SPECIAL_TOKENS + [0], device=input_ids.device)
delimiter_token_masks = torch.isin(input_ids, delimiter_tokens)
label_groups = torch.cumsum(delimiter_token_masks, dim=1) * (~delimiter_token_masks).to(torch.int32)
label_maps = ()
# Iterate over batch dimension as we can have different number of labels
for label_group in label_groups:
# `label_group` is a tensor of shape `(seq_len,)` with zeros for non-label tokens and integers for label tokens
# label tokens with same integer value are part of the same label group
# Get unique labels and exclude 0 (i.e. non-label tokens)
unique_labels = torch.unique(label_group)[1:, None]
num_labels = unique_labels.shape[0]
# Create one-hot encoding for each label group
label_map = label_group.unsqueeze(0).repeat(num_labels, 1)
label_map = torch.where(label_map == unique_labels, 1, 0)
# Pad label_map to match `max_seq_len`
label_map = F.pad(label_map, (0, max_seq_len - label_map.shape[1]), value=0)
label_maps += (label_map,)
return label_maps
def build_text_mask(logits, attention_mask):
"""
Create text_mask based on the matching indices
"""
seq_len = attention_mask.shape[1]
text_mask = torch.zeros_like(logits, device=logits.device, dtype=attention_mask.dtype)
text_mask[:, :, :seq_len] = attention_mask[:, None, :]
return text_mask.bool()
@auto_docstring(
custom_intro="""
Grounding DINO Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top,
for tasks such as COCO detection.
"""
)
|
MMGroundingDinoObjectDetectionOutput
|
python
|
huggingface__transformers
|
src/transformers/models/florence2/modular_florence2.py
|
{
"start": 70238,
"end": 77608
}
|
class ____(LlavaForConditionalGeneration):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = {
"lm_head.weight": "model.language_model.shared.weight",
}
def get_image_features(self, pixel_values: torch.Tensor, **kwargs):
return self.model.get_image_features(pixel_values=pixel_values, **kwargs)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Florence2Seq2SeqLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Florence2ForConditionalGeneration
>>> model = Florence2ForConditionalGeneration.from_pretrained("florence-community/Florence-2-large")
>>> processor = AutoProcessor.from_pretrained("florence-community/Florence-2-large")
>>> prompt = "<CAPTION>"
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(text=prompt, images=image, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_length=100)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"A green car parked in front of a yellow building."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.text_config.pad_token_id, self.config.text_config.decoder_start_token_id
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
# **kwargs, ## TODO: add back when Bart attention is refactored and takes kwargs
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Florence2Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
image_hidden_states=outputs.image_hidden_states,
)
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
return self.model.get_placeholder_mask(
input_ids=input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
def _prepare_encoder_decoder_kwargs_for_generation(
self,
inputs_tensor: torch.Tensor,
model_kwargs,
model_input_name: Optional[str],
generation_config,
) -> dict[str, Any]:
# override to handle merging image and text embeddings before passing to language encoder
inputs_embeds = model_kwargs.pop("inputs_embeds", None)
pixel_values = model_kwargs.pop("pixel_values", None)
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(inputs_tensor)
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
inputs_tensor, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
model_kwargs["inputs_embeds"] = inputs_embeds
model_kwargs = super()._prepare_encoder_decoder_kwargs_for_generation(
None, model_kwargs, model_input_name, generation_config
)
model_kwargs.pop("inputs_embeds", None)
return model_kwargs
__all__ = [
"Florence2Config",
"Florence2Processor",
"Florence2VisionConfig",
"Florence2Model",
"Florence2ForConditionalGeneration",
"Florence2PreTrainedModel",
"Florence2VisionBackbone",
"Florence2VisionPreTrainedModel",
]
|
Florence2ForConditionalGeneration
|
python
|
doocs__leetcode
|
solution/1100-1199/1117.Building H2O/Solution.py
|
{
"start": 34,
"end": 605
}
|
class ____:
def __init__(self):
self.h = Semaphore(2)
self.o = Semaphore(0)
def hydrogen(self, releaseHydrogen: "Callable[[], None]") -> None:
self.h.acquire()
# releaseHydrogen() outputs "H". Do not change or remove this line.
releaseHydrogen()
if self.h._value == 0:
self.o.release()
def oxygen(self, releaseOxygen: "Callable[[], None]") -> None:
self.o.acquire()
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen()
self.h.release(2)
|
H2O
|
python
|
kamyu104__LeetCode-Solutions
|
Python/circle-and-rectangle-overlapping.py
|
{
"start": 602,
"end": 1162
}
|
class ____(object):
def checkOverlap(self, radius, x_center, y_center, x1, y1, x2, y2):
"""
:type radius: int
:type x_center: int
:type y_center: int
:type x1: int
:type y1: int
:type x2: int
:type y2: int
:rtype: bool
"""
x1 -= x_center
y1 -= y_center
x2 -= x_center
y2 -= y_center
x = min(abs(x1), abs(x2)) if x1*x2 > 0 else 0
y = min(abs(y1), abs(y2)) if y1*y2 > 0 else 0
return x**2 + y**2 <= radius**2
|
Solution2
|
python
|
huggingface__transformers
|
src/transformers/models/vitpose_backbone/modeling_vitpose_backbone.py
|
{
"start": 11141,
"end": 12845
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config: VitPoseBackboneConfig):
super().__init__()
self.num_experts = config.num_experts
self.attention = VitPoseBackboneAttention(config)
self.mlp = VitPoseBackboneMLP(config) if self.num_experts == 1 else VitPoseBackboneMoeMLP(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# Validate dataset_index when using multiple experts
if self.num_experts > 1 and dataset_index is None:
raise ValueError(
"dataset_index must be provided when using multiple experts "
f"(num_experts={self.num_experts}). Please provide dataset_index "
"to the forward pass."
)
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm)
# first residual connection
hidden_states = attention_output + hidden_states
layer_output = self.layernorm_after(hidden_states)
if self.num_experts == 1:
layer_output = self.mlp(layer_output)
else:
layer_output = self.mlp(layer_output, indices=dataset_index)
# second residual connection
layer_output = layer_output + hidden_states
return layer_output
# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VitPoseBackbone
|
VitPoseBackboneLayer
|
python
|
doocs__leetcode
|
solution/1700-1799/1714.Sum Of Special Evenly-Spaced Elements In Array/Solution.py
|
{
"start": 0,
"end": 553
}
|
class ____:
def solve(self, nums: List[int], queries: List[List[int]]) -> List[int]:
mod = 10**9 + 7
n = len(nums)
m = int(sqrt(n))
suf = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(n - 1, -1, -1):
suf[i][j] = suf[i][min(n, j + i)] + nums[j]
ans = []
for x, y in queries:
if y <= m:
ans.append(suf[y][x] % mod)
else:
ans.append(sum(nums[x::y]) % mod)
return ans
|
Solution
|
python
|
aio-libs__aiohttp
|
aiohttp/connector.py
|
{
"start": 2464,
"end": 5163
}
|
class ____:
"""Represents a single connection."""
__slots__ = (
"_key",
"_connector",
"_loop",
"_protocol",
"_callbacks",
"_source_traceback",
)
def __init__(
self,
connector: "BaseConnector",
key: "ConnectionKey",
protocol: ResponseHandler,
loop: asyncio.AbstractEventLoop,
) -> None:
self._key = key
self._connector = connector
self._loop = loop
self._protocol: ResponseHandler | None = protocol
self._callbacks: list[Callable[[], None]] = []
self._source_traceback = (
traceback.extract_stack(sys._getframe(1)) if loop.get_debug() else None
)
def __repr__(self) -> str:
return f"Connection<{self._key}>"
def __del__(self, _warnings: Any = warnings) -> None:
if self._protocol is not None:
_warnings.warn(
f"Unclosed connection {self!r}", ResourceWarning, source=self
)
if self._loop.is_closed():
return
self._connector._release(self._key, self._protocol, should_close=True)
context = {"client_connection": self, "message": "Unclosed connection"}
if self._source_traceback is not None:
context["source_traceback"] = self._source_traceback
self._loop.call_exception_handler(context)
def __bool__(self) -> Literal[True]:
"""Force subclasses to not be falsy, to make checks simpler."""
return True
@property
def transport(self) -> asyncio.Transport | None:
if self._protocol is None:
return None
return self._protocol.transport
@property
def protocol(self) -> ResponseHandler | None:
return self._protocol
def add_callback(self, callback: Callable[[], None]) -> None:
if callback is not None:
self._callbacks.append(callback)
def _notify_release(self) -> None:
callbacks, self._callbacks = self._callbacks[:], []
for cb in callbacks:
with suppress(Exception):
cb()
def close(self) -> None:
self._notify_release()
if self._protocol is not None:
self._connector._release(self._key, self._protocol, should_close=True)
self._protocol = None
def release(self) -> None:
self._notify_release()
if self._protocol is not None:
self._connector._release(self._key, self._protocol)
self._protocol = None
@property
def closed(self) -> bool:
return self._protocol is None or not self._protocol.is_connected()
|
Connection
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
|
{
"start": 8649,
"end": 9922
}
|
class ____(Benchmark):
r"""
Bohachevsky 1 objective function.
The Bohachevsky 1 [1]_ global optimization problem is a multimodal
minimization problem defined as follows
.. math::
f_{\text{Bohachevsky}}(x) = \sum_{i=1}^{n-1}\left[x_i^2 + 2 x_{i+1}^2 -
0.3 \cos(3 \pi x_i) - 0.4 \cos(4 \pi x_{i + 1}) + 0.7 \right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-15, 15]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for :math:`i = 1,
..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO: equation needs to be fixed up in the docstring. see Jamil#17
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0])
- 0.4 * cos(4 * pi * x[1]) + 0.7)
|
Bohachevsky1
|
python
|
sympy__sympy
|
sympy/codegen/fnodes.py
|
{
"start": 18679,
"end": 18756
}
|
class ____(F95Function):
""" Fortran merge function """
nargs = 3
|
merge
|
python
|
getsentry__sentry
|
tests/sentry/apidocs/test_extensions.py
|
{
"start": 594,
"end": 717
}
|
class ____(TypedDict, total=False):
a: int
@extend_schema_serializer(exclude_fields=["excluded"])
|
BasicSerializerOptional
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-aws/dagster_aws/ecs/tasks.py
|
{
"start": 10947,
"end": 15341
}
|
class ____(Exception):
pass
def get_task_definition_dict_from_current_task(
ecs,
family,
current_task,
image: str,
container_name,
environment,
command=None,
secrets=None,
include_sidecars=False,
task_role_arn=None,
execution_role_arn=None,
runtime_platform=None,
cpu=None,
memory=None,
ephemeral_storage=None,
mount_points=None,
volumes=None,
additional_sidecars=None,
repository_credentials=None,
):
current_container_name = current_ecs_container_name()
current_task_definition_arn = current_task["taskDefinitionArn"]
current_task_definition_dict = ecs.describe_task_definition(
taskDefinition=current_task_definition_arn
)["taskDefinition"]
current_container_definition = next(
iter(
[
container
for container in current_task_definition_dict["containerDefinitions"]
if container["name"] == current_container_name
]
)
)
# Don't automatically include health check - may be specific to the current task
current_container_definition = {
key: val for key, val in current_container_definition.items() if key != "healthCheck"
}
# Start with the current process's task's definition but remove
# extra keys that aren't useful for creating a new task definition
# (status, revision, etc.)
expected_keys = [
key for key in ecs.meta.service_model.shape_for("RegisterTaskDefinitionRequest").members
]
task_definition = dict(
(key, current_task_definition_dict[key])
for key in expected_keys
if key in current_task_definition_dict.keys()
)
# The current process might not be running in a container that has the
# job's code installed. Inherit most of the process's container
# definition (things like environment, dependencies, etc.) but replace
# the image with the job origin's image and give it a new name.
# Also remove entryPoint. We plan to set containerOverrides. If both
# entryPoint and containerOverrides are specified, they're concatenated
# and the command will fail
# https://aws.amazon.com/blogs/opensource/demystifying-entrypoint-cmd-docker/
new_container_definition = {
**current_container_definition,
"name": container_name,
"image": image,
"entryPoint": [],
"command": command if command else [],
**(
{"repositoryCredentials": {"credentialsParameter": repository_credentials}}
if repository_credentials
else {}
),
**({"secrets": secrets} if secrets else {}),
**({} if include_sidecars else {"dependsOn": []}),
}
if environment:
new_container_definition["environment"] = [
*new_container_definition["environment"],
*environment,
]
if mount_points:
new_container_definition["mountPoints"] = (
new_container_definition.get("mountPoints", []) + mount_points
)
if include_sidecars:
# Start with all the sidecars
container_definitions = [
container_definition
for container_definition in current_task_definition_dict.get("containerDefinitions", [])
if container_definition["name"] != current_container_name
]
# add the adjusted container based on the current container
container_definitions.append(new_container_definition)
else:
container_definitions = [new_container_definition]
if additional_sidecars:
container_definitions = [*container_definitions, *additional_sidecars]
task_definition = {
**task_definition,
"family": family,
"containerDefinitions": container_definitions,
**({"taskRoleArn": task_role_arn} if task_role_arn else {}),
**({"executionRoleArn": execution_role_arn} if execution_role_arn else {}),
**({"runtimePlatform": runtime_platform} if runtime_platform else {}),
**({"cpu": cpu} if cpu else {}),
**({"memory": memory} if memory else {}),
**({"ephemeralStorage": {"sizeInGiB": ephemeral_storage}} if ephemeral_storage else {}),
}
if volumes:
task_definition["volumes"] = task_definition.get("volumes", []) + volumes
return task_definition
|
EcsNoTasksFound
|
python
|
kamyu104__LeetCode-Solutions
|
Python/reverse-subarray-to-maximize-array-value.py
|
{
"start": 29,
"end": 677
}
|
class ____(object):
def maxValueAfterReverse(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result, add, max_pair, min_pair = 0, 0, float("-inf"), float("inf")
for i in xrange(1, len(nums)):
result += abs(nums[i-1]-nums[i])
add = max(add,
abs(nums[0]-nums[i]) - abs(nums[i-1]-nums[i]),
abs(nums[-1]-nums[i-1]) - abs(nums[i-1]-nums[i]))
min_pair = min(min_pair, max(nums[i-1], nums[i]))
max_pair = max(max_pair, min(nums[i-1], nums[i]))
return result + max(add, (max_pair-min_pair)*2)
|
Solution
|
python
|
pyinstaller__pyinstaller
|
bootloader/waflib/Tools/qt5.py
|
{
"start": 7624,
"end": 7828
}
|
class ____(Task.Task):
color = 'BLUE'
run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}'
def quote_flag(self, x):
return x
|
moc
|
python
|
scikit-learn__scikit-learn
|
examples/bicluster/plot_bicluster_newsgroups.py
|
{
"start": 1890,
"end": 5351
}
|
class ____(TfidfVectorizer):
def build_tokenizer(self):
tokenize = super().build_tokenizer()
return lambda doc: list(number_normalizer(tokenize(doc)))
# exclude 'comp.os.ms-windows.misc'
categories = [
"alt.atheism",
"comp.graphics",
"comp.sys.ibm.pc.hardware",
"comp.sys.mac.hardware",
"comp.windows.x",
"misc.forsale",
"rec.autos",
"rec.motorcycles",
"rec.sport.baseball",
"rec.sport.hockey",
"sci.crypt",
"sci.electronics",
"sci.med",
"sci.space",
"soc.religion.christian",
"talk.politics.guns",
"talk.politics.mideast",
"talk.politics.misc",
"talk.religion.misc",
]
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = NumberNormalizingVectorizer(stop_words="english", min_df=5)
cocluster = SpectralCoclustering(
n_clusters=len(categories), svd_method="arpack", random_state=0
)
kmeans = MiniBatchKMeans(
n_clusters=len(categories), batch_size=20000, random_state=0, n_init=3
)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print(
f"Done in {time() - start_time:.2f}s. V-measure: \
{v_measure_score(y_cocluster, y_true):.4f}"
)
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print(
f"Done in {time() - start_time:.2f}s. V-measure: \
{v_measure_score(y_kmeans, y_true):.4f}"
)
feature_names = vectorizer.get_feature_names_out()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis],
# cols].sum() but much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = X[row_complement][:, cols].sum() + X[rows][:, col_complement].sum()
return cut / weight
bicluster_ncuts = list(bicluster_ncut(i) for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = Counter(document_names[doc] for doc in cluster_docs)
cat_string = ", ".join(
f"{(c / n_rows * 100):.0f}% {name}" for name, c in counter.most_common(3)
)
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = out_of_cluster_docs.nonzero()[0]
word_col = X[:, cluster_words]
word_scores = np.array(
word_col[cluster_docs, :].sum(axis=0)
- word_col[out_of_cluster_docs, :].sum(axis=0)
)
word_scores = word_scores.ravel()
important_words = list(
feature_names[cluster_words[i]] for i in word_scores.argsort()[:-11:-1]
)
print(f"bicluster {idx} : {n_rows} documents, {n_cols} words")
print(f"categories : {cat_string}")
print(f"words : {', '.join(important_words)}\n")
|
NumberNormalizingVectorizer
|
python
|
pytorch__pytorch
|
torch/_dynamo/convert_frame.py
|
{
"start": 76710,
"end": 80557
}
|
class ____:
def __init__(self, callback: ConvertFrameProtocol, hooks: Hooks) -> None:
functools.wraps(callback)(self)
self._torchdynamo_orig_backend = callback
self.hooks = hooks
def __call__(
self,
frame: DynamoFrameType,
cache_entry: Optional[CacheEntry],
frame_state: dict[str, Union[int, FrameStateSizeEntry]],
) -> ConvertFrameReturn:
assert frame_state is not None
input_codes.add(frame.f_code)
is_skipfile = trace_rules.check(frame.f_code)
if sys.version_info >= (3, 13):
has_started_execution = frame.f_lasti > first_real_inst_idx(frame.f_code)
else:
has_started_execution = frame.f_lasti >= first_real_inst_idx(frame.f_code)
if (
# TODO: the first condition is not covered by any test
has_started_execution
or is_skipfile
or config.disable
or (
should_skip_due_to_torch_dispatch_mode()
and not getattr(self._torchdynamo_orig_backend, "_export", False)
)
):
if log.isEnabledFor(logging.DEBUG):
if has_started_execution:
skip_reason = "traced frame already"
elif trace_rules.check(frame.f_code):
skip_reason = "in skipfiles"
elif is_in_torch_dispatch_mode(include_infra_modes=False):
skip_reason = "non-infra torch dispatch mode present, this is not supported today in torch.compile"
else:
skip_reason = "dynamo tracing is disabled"
log.debug(
"skipping: %s (reason: %s, file: %s)",
frame.f_code.co_name,
skip_reason,
frame.f_code.co_filename,
)
return ConvertFrameReturn()
if (
frame.f_code.co_filename == "<string>" and frame.f_code.co_name == "__new__"
) or (
frame.f_code.co_filename.endswith("collections/__init__.py")
and frame.f_code.co_name == "_make"
):
# nametuple constructor/_make
return ConvertFrameReturn()
if torch._dynamo.utils.get_optimize_ddp_mode() == "ddp_optimizer":
ddp_module = DistributedDataParallel._get_active_ddp_module()
if ddp_module:
with compile_lock:
from torch._dynamo.backends.distributed import DDPOptimizer
ddp_optimizer = DDPOptimizer(
bucket_bytes_cap=ddp_module.bucket_bytes_cap,
backend_compile_fn=self._torchdynamo_orig_backend._torchdynamo_orig_backend, # type: ignore[attr-defined]
)
assert hasattr(
self._torchdynamo_orig_backend, "_clone_with_backend"
), (
"DDPOptimizer only supports callback fns that know how to clone themselves."
)
hijacked_callback = (
self._torchdynamo_orig_backend._clone_with_backend(
ddp_optimizer.compile_fn,
)
)
return hijacked_callback(
frame, cache_entry, self.hooks, frame_state
)
with compile_lock, _disable_current_modes():
# skip=1: skip this frame
result = self._torchdynamo_orig_backend(
frame, cache_entry, self.hooks, frame_state, skip=1
)
return result
def catch_errors_wrapper(
callback: ConvertFrameProtocol, hooks: Hooks
) -> CatchErrorsWrapper:
return CatchErrorsWrapper(callback, hooks)
|
CatchErrorsWrapper
|
python
|
django__django
|
tests/middleware/test_security.py
|
{
"start": 142,
"end": 12079
}
|
class ____(SimpleTestCase):
def middleware(self, *args, **kwargs):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware(self.response(*args, **kwargs))
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, headers=None, **kwargs):
def get_response(req):
response = HttpResponse(*args, **kwargs)
if headers:
for k, v in headers.items():
response.headers[k] = v
return response
return get_response
def process_response(self, *args, secure=False, request=None, **kwargs):
request_kwargs = {}
if secure:
request_kwargs.update(self.secure_request_kwargs)
if request is None:
request = self.request.get("/some/url", **request_kwargs)
ret = self.middleware(*args, **kwargs).process_request(request)
if ret:
return ret
return self.middleware(*args, **kwargs)(request)
request = RequestFactory()
def process_request(self, method, *args, secure=False, **kwargs):
if secure:
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware().process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With SECURE_HSTS_SECONDS=3600, the middleware adds
"Strict-Transport-Security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True).headers["Strict-Transport-Security"],
"max-age=3600",
)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "Strict-Transport-Security" header
already present in the response.
"""
response = self.process_response(
secure=True, headers={"Strict-Transport-Security": "max-age=7200"}
)
self.assertEqual(response.headers["Strict-Transport-Security"], "max-age=7200")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "Strict-Transport-Security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn(
"Strict-Transport-Security",
self.process_response(secure=False).headers,
)
@override_settings(SECURE_HSTS_SECONDS=0)
def test_sts_off(self):
"""
With SECURE_HSTS_SECONDS=0, the middleware does not add a
"Strict-Transport-Security" header to the response.
"""
self.assertNotIn(
"Strict-Transport-Security",
self.process_response(secure=True).headers,
)
@override_settings(SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "Strict-Transport-Security" header with the
"includeSubDomains" directive to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response.headers["Strict-Transport-Security"],
"max-age=600; includeSubDomains",
)
@override_settings(SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "Strict-Transport-Security" header without
the "includeSubDomains" directive to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response.headers["Strict-Transport-Security"], "max-age=600")
@override_settings(SECURE_HSTS_SECONDS=10886400, SECURE_HSTS_PRELOAD=True)
def test_sts_preload(self):
"""
With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_PRELOAD True, the
middleware adds a "Strict-Transport-Security" header with the "preload"
directive to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response.headers["Strict-Transport-Security"],
"max-age=10886400; preload",
)
@override_settings(
SECURE_HSTS_SECONDS=10886400,
SECURE_HSTS_INCLUDE_SUBDOMAINS=True,
SECURE_HSTS_PRELOAD=True,
)
def test_sts_subdomains_and_preload(self):
"""
With SECURE_HSTS_SECONDS non-zero, SECURE_HSTS_INCLUDE_SUBDOMAINS and
SECURE_HSTS_PRELOAD True, the middleware adds a
"Strict-Transport-Security" header containing both the
"includeSubDomains" and "preload" directives to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response.headers["Strict-Transport-Security"],
"max-age=10886400; includeSubDomains; preload",
)
@override_settings(SECURE_HSTS_SECONDS=10886400, SECURE_HSTS_PRELOAD=False)
def test_sts_no_preload(self):
"""
With SECURE_HSTS_SECONDS non-zero and SECURE_HSTS_PRELOAD
False, the middleware adds a "Strict-Transport-Security" header without
the "preload" directive to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(
response.headers["Strict-Transport-Security"],
"max-age=10886400",
)
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With SECURE_CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"X-Content-Type-Options: nosniff" header to the response.
"""
self.assertEqual(
self.process_response().headers["X-Content-Type-Options"],
"nosniff",
)
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "X-Content-Type-Options" header
already present in the response.
"""
response = self.process_response(
secure=True, headers={"X-Content-Type-Options": "foo"}
)
self.assertEqual(response.headers["X-Content-Type-Options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With SECURE_CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"X-Content-Type-Options" header to the response.
"""
self.assertNotIn("X-Content-Type-Options", self.process_response().headers)
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SECURE_SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertIsNone(ret)
@override_settings(SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertIsNone(ret)
@override_settings(SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SECURE_SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SECURE_SSL_REDIRECT False, the middleware does not redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertIsNone(ret)
@override_settings(SECURE_REFERRER_POLICY=None)
def test_referrer_policy_off(self):
"""
With SECURE_REFERRER_POLICY set to None, the middleware does not add a
"Referrer-Policy" header to the response.
"""
self.assertNotIn("Referrer-Policy", self.process_response().headers)
def test_referrer_policy_on(self):
"""
With SECURE_REFERRER_POLICY set to a valid value, the middleware adds a
"Referrer-Policy" header to the response.
"""
tests = (
("strict-origin", "strict-origin"),
("strict-origin,origin", "strict-origin,origin"),
("strict-origin, origin", "strict-origin,origin"),
(["strict-origin", "origin"], "strict-origin,origin"),
(("strict-origin", "origin"), "strict-origin,origin"),
)
for value, expected in tests:
with (
self.subTest(value=value),
override_settings(SECURE_REFERRER_POLICY=value),
):
self.assertEqual(
self.process_response().headers["Referrer-Policy"],
expected,
)
@override_settings(SECURE_REFERRER_POLICY="strict-origin")
def test_referrer_policy_already_present(self):
"""
The middleware will not override a "Referrer-Policy" header already
present in the response.
"""
response = self.process_response(headers={"Referrer-Policy": "unsafe-url"})
self.assertEqual(response.headers["Referrer-Policy"], "unsafe-url")
@override_settings(SECURE_CROSS_ORIGIN_OPENER_POLICY=None)
def test_coop_off(self):
"""
With SECURE_CROSS_ORIGIN_OPENER_POLICY set to None, the middleware does
not add a "Cross-Origin-Opener-Policy" header to the response.
"""
self.assertNotIn("Cross-Origin-Opener-Policy", self.process_response())
def test_coop_default(self):
"""SECURE_CROSS_ORIGIN_OPENER_POLICY defaults to same-origin."""
self.assertEqual(
self.process_response().headers["Cross-Origin-Opener-Policy"],
"same-origin",
)
def test_coop_on(self):
"""
With SECURE_CROSS_ORIGIN_OPENER_POLICY set to a valid value, the
middleware adds a "Cross-Origin_Opener-Policy" header to the response.
"""
tests = ["same-origin", "same-origin-allow-popups", "unsafe-none"]
for value in tests:
with (
self.subTest(value=value),
override_settings(
SECURE_CROSS_ORIGIN_OPENER_POLICY=value,
),
):
self.assertEqual(
self.process_response().headers["Cross-Origin-Opener-Policy"],
value,
)
@override_settings(SECURE_CROSS_ORIGIN_OPENER_POLICY="unsafe-none")
def test_coop_already_present(self):
"""
The middleware doesn't override a "Cross-Origin-Opener-Policy" header
already present in the response.
"""
response = self.process_response(
headers={"Cross-Origin-Opener-Policy": "same-origin"}
)
self.assertEqual(response.headers["Cross-Origin-Opener-Policy"], "same-origin")
|
SecurityMiddlewareTest
|
python
|
eventlet__eventlet
|
eventlet/green/http/cookies.py
|
{
"start": 23727,
"end": 24189
}
|
class ____(BaseCookie):
"""
SimpleCookie supports strings as cookie values. When setting
the value using the dictionary assignment notation, SimpleCookie
calls the builtin str() to convert the value to a string. Values
received from HTTP are kept as strings.
"""
def value_decode(self, val):
return _unquote(val), val
def value_encode(self, val):
strval = str(val)
return strval, _quote(strval)
|
SimpleCookie
|
python
|
apache__airflow
|
providers/celery/tests/unit/celery/sensors/test_celery_queue.py
|
{
"start": 949,
"end": 2943
}
|
class ____:
def setup_method(self):
class TestCeleryqueueSensor(CeleryQueueSensor):
def _check_task_id(self, context):
return True
self.sensor = TestCeleryqueueSensor
@patch("celery.app.control.Inspect")
def test_poke_success(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {"test_queue": []}
mock_inspect_result.scheduled.return_value = {"test_queue": []}
mock_inspect_result.active.return_value = {"test_queue": []}
test_sensor = self.sensor(celery_queue="test_queue", task_id="test-task")
assert test_sensor.poke(None)
@patch("celery.app.control.Inspect")
def test_poke_fail(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {"test_queue": []}
mock_inspect_result.scheduled.return_value = {"test_queue": []}
mock_inspect_result.active.return_value = {"test_queue": ["task"]}
test_sensor = self.sensor(celery_queue="test_queue", task_id="test-task")
assert not test_sensor.poke(None)
@patch("celery.app.control.Inspect")
def test_poke_fail_with_exception(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
mock_inspect_result.reserved.return_value = {}
mock_inspect_result.scheduled.return_value = {}
mock_inspect_result.active.return_value = {}
test_sensor = self.sensor(celery_queue="test_queue", task_id="test-task")
with pytest.raises(KeyError):
test_sensor.poke(None)
@patch("celery.app.control.Inspect")
def test_poke_success_with_taskid(self, mock_inspect):
test_sensor = self.sensor(
celery_queue="test_queue", task_id="test-task", target_task_id="target-task"
)
assert test_sensor.poke(None)
|
TestCeleryQueueSensor
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/executors/kubernetes_executor_utils.py
|
{
"start": 2058,
"end": 2213
}
|
class ____(metaclass=Singleton):
"""Singleton for tracking resourceVersion from Kubernetes."""
resource_version: dict[str, str] = {}
|
ResourceVersion
|
python
|
mlflow__mlflow
|
tests/db/test_tracking_operations.py
|
{
"start": 265,
"end": 5909
}
|
class ____(mlflow.pyfunc.PythonModel):
def load_context(self, context):
pass
def predict(self, context, model_input, params=None):
pass
def start_run_and_log_data():
with mlflow.start_run():
mlflow.log_param("p", "param")
mlflow.log_metric("m", 1.0)
mlflow.set_tag("t", "tag")
mlflow.pyfunc.log_model(name="model", python_model=Model(), registered_model_name="model")
def test_search_runs():
start_run_and_log_data()
runs = mlflow.search_runs(experiment_ids=["0"], order_by=["param.start_time DESC"])
mlflow.get_run(runs["run_id"][0])
def test_set_run_status_to_killed():
"""
This test ensures the following migration scripts work correctly:
- cfd24bdc0731_update_run_status_constraint_with_killed.py
- 0a8213491aaa_drop_duplicate_killed_constraint.py
"""
with mlflow.start_run() as run:
pass
client = MlflowClient()
client.set_terminated(run_id=run.info.run_id, status="KILLED")
def test_database_operational_error(monkeypatch):
# This test is specifically designed to force errors with SQLite. Skip it if
# using a non-SQLite backend.
if not MLFLOW_TRACKING_URI.get().startswith("sqlite"):
pytest.skip("Only works on SQLite")
# This test patches parts of SQLAlchemy and sqlite3.dbapi to simulate a
# SQLAlchemy OperationalError. PEP 249 describes OperationalError as:
#
# > Exception raised for errors that are related to the database's operation
# > and not necessarily under the control of the programmer, e.g. an
# > unexpected disconnect occurs, the data source name is not found, a
# > transaction could not be processed, a memory allocation error occurred
# > during processing, etc.
#
# These errors are typically transient and can be resolved by retrying the
# operation, hence MLflow has different handling for them as compared to
# the more generic exception type, SQLAlchemyError.
#
# This is particularly important for REST clients, where
# TEMPORARILY_UNAVAILABLE triggers MLflow REST clients to retry the request,
# whereas BAD_REQUEST does not.
api_module = None
old_connect = None
# Depending on the version of SQLAlchemy, the function we need to patch is
# either called "dbapi" (sqlalchemy<2.0) or "import_dbapi"
# (sqlalchemy>=2.0).
for dialect_attr in ["dbapi", "import_dbapi"]:
if hasattr(sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite, dialect_attr):
break
else:
raise AssertionError("Could not find dbapi attribute on SQLiteDialect_pysqlite")
old_dbapi = getattr(sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite, dialect_attr)
class ConnectionWrapper:
"""Wraps a sqlite3.Connection object."""
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def cursor(self):
"""Return a wrapped SQLite cursor."""
return CursorWrapper(self.conn.cursor())
class CursorWrapper:
"""Wraps a sqlite3.Cursor object."""
def __init__(self, cursor):
self.cursor = cursor
def __getattr__(self, name):
return getattr(self.cursor, name)
def execute(self, *args, **kwargs):
"""Wraps execute(), simulating sporadic OperationalErrors."""
if (
len(args) >= 2
and "test_database_operational_error_1667938883_param" in args[1]
and "test_database_operational_error_1667938883_value" in args[1]
):
# Simulate a database error
raise sqlite3.OperationalError("test")
return self.cursor.execute(*args, **kwargs)
def connect(*args, **kwargs):
"""Wraps sqlite3.dbapi.connect(), returning a wrapped connection."""
conn = old_connect(*args, **kwargs)
return ConnectionWrapper(conn)
def dbapi(*args, **kwargs):
"""Wraps SQLiteDialect_pysqlite.dbapi(), returning patched dbapi."""
nonlocal api_module, old_connect
if api_module is None:
# Only patch the first time dbapi() is called, to avoid recursion.
api_module = old_dbapi(*args, **kwargs)
old_connect = api_module.connect
monkeypatch.setattr(api_module, "connect", connect)
return api_module
monkeypatch.setattr(
sqlalchemy.dialects.sqlite.pysqlite.SQLiteDialect_pysqlite, dialect_attr, dbapi
)
# Create and use a unique tracking URI for this test. This avoids an issue
# where an earlier test has already created and cached a SQLAlchemy engine
# (i.e. database connections), preventing our error-throwing monkeypatches
# from being called.
monkeypatch.setenv(MLFLOW_TRACKING_URI.name, f"{MLFLOW_TRACKING_URI.get()}-{uuid.uuid4().hex}")
with mock.patch("mlflow.store.db.utils._logger.exception") as exception:
with pytest.raises(mlflow.MlflowException, match=r"sqlite3\.OperationalError"):
with mlflow.start_run():
# This statement will fail with an OperationalError.
mlflow.log_param(
"test_database_operational_error_1667938883_param",
"test_database_operational_error_1667938883_value",
)
# Verify that the error handling was executed.
assert any(
"SQLAlchemy database error" in str(call) and "sqlite3.OperationalError" in str(call)
for call in exception.mock_calls
)
|
Model
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1046867,
"end": 1047520
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of UpdateTeamsRepository"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "repository", "teams")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
repository = sgqlc.types.Field("Repository", graphql_name="repository")
"""The repository that was updated."""
teams = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null("Team")), graphql_name="teams")
"""The teams granted permission on the repository."""
|
UpdateTeamsRepositoryPayload
|
python
|
encode__django-rest-framework
|
rest_framework/viewsets.py
|
{
"start": 8357,
"end": 8632
}
|
class ____(ViewSetMixin, generics.GenericAPIView):
"""
The GenericViewSet class does not provide any actions by default,
but does include the base set of generic view behavior, such as
the `get_object` and `get_queryset` methods.
"""
pass
|
GenericViewSet
|
python
|
aio-libs__aiohttp
|
aiohttp/web_protocol.py
|
{
"start": 2854,
"end": 2991
}
|
class ____:
status: int
exc: BaseException
message: str
_MsgType = tuple[RawRequestMessage | _ErrInfo, StreamReader]
|
_ErrInfo
|
python
|
huggingface__transformers
|
tests/models/owlvit/test_image_processing_owlvit.py
|
{
"start": 3145,
"end": 5059
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = OwlViTImageProcessor if is_vision_available() else None
fast_image_processing_class = OwlViTImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = OwlViTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
|
OwlViTImageProcessingTest
|
python
|
modin-project__modin
|
modin/tests/pandas/test_io.py
|
{
"start": 112357,
"end": 120688
}
|
class ____:
# It's not easy to add infrastructure for `spss` format.
# In case of defaulting to pandas, it's enough
# to check that the parameters are passed to pandas.
def test_read_spss(self):
test_args = ("fake_path",)
test_kwargs = dict(
usecols=["A"], convert_categoricals=False, dtype_backend=lib.no_default
)
with mock.patch(
"pandas.read_spss", return_value=pandas.DataFrame([])
) as read_spss:
pd.read_spss(*test_args, **test_kwargs)
read_spss.assert_called_once_with(*test_args, **test_kwargs)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_json_normalize():
# example from pandas
data = [
{"id": 1, "name": {"first": "Coleen", "last": "Volk"}},
{"name": {"given": "Mark", "family": "Regner"}},
{"id": 2, "name": "Faye Raker"},
]
eval_io("json_normalize", data=data)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_from_arrow():
_, pandas_df = create_test_dfs(TEST_DATA)
modin_df = from_arrow(pa.Table.from_pandas(pandas_df))
df_equals(modin_df, pandas_df)
@pytest.mark.skipif(
condition=Engine.get() != "Ray",
reason="Distributed 'from_pandas' is only available for Ray engine",
)
@pytest.mark.parametrize("modify_config", [{AsyncReadMode: True}], indirect=True)
def test_distributed_from_pandas(modify_config):
pandas_df = pandas.DataFrame({f"col{i}": np.arange(200_000) for i in range(64)})
modin_df = pd.DataFrame(pandas_df)
df_equals(modin_df, pandas_df)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_from_spmatrix():
data = sparse.eye(3)
with pytest.warns(UserWarning, match="defaulting to pandas.*"):
modin_df = pd.DataFrame.sparse.from_spmatrix(data)
pandas_df = pandas.DataFrame.sparse.from_spmatrix(data)
df_equals(modin_df, pandas_df)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_to_dense():
data = {"col1": pandas.arrays.SparseArray([0, 1, 0])}
modin_df, pandas_df = create_test_dfs(data)
df_equals(modin_df.sparse.to_dense(), pandas_df.sparse.to_dense())
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_to_dict_dataframe():
modin_df, _ = create_test_dfs(TEST_DATA)
assert modin_df.to_dict() == to_pandas(modin_df).to_dict()
@pytest.mark.parametrize(
"kwargs",
[
pytest.param({}, id="no_kwargs"),
pytest.param({"into": dict}, id="into_dict"),
pytest.param({"into": defaultdict(list)}, id="into_defaultdict"),
],
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_to_dict_series(kwargs):
eval_general(
*[df.iloc[:, 0] for df in create_test_dfs(utils_test_data["int_data"])],
lambda df: df.to_dict(**kwargs),
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_to_latex():
modin_df, _ = create_test_dfs(TEST_DATA)
assert modin_df.to_latex() == to_pandas(modin_df).to_latex()
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="https://github.com/modin-project/modin/issues/7497",
)
def test_to_xml():
# `lxml` is a required dependency for `to_xml`, but optional for Modin.
# For some engines we do not install it.
pytest.importorskip("lxml")
modin_df, _ = create_test_dfs(TEST_DATA)
assert modin_df.to_xml() == to_pandas(modin_df).to_xml()
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_to_period():
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
modin_df, pandas_df = create_test_dfs(TEST_DATA, index=index)
df_equals(modin_df.to_period(), pandas_df.to_period())
@pytest.mark.xfail(
Engine.get() == "Ray" and version.parse(ray.__version__) <= version.parse("2.9.3"),
reason="Ray-2.9.3 has a problem using pandas 2.2.0. It will be resolved in the next release of Ray.",
)
@pytest.mark.skipif(
condition=Engine.get() != "Ray",
reason="Modin Dataframe can only be converted to a Ray Dataset if Modin uses a Ray engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_df_to_ray():
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
modin_df, pandas_df = create_test_dfs(TEST_DATA, index=index)
ray_dataset = modin_df.modin.to_ray()
df_equals(ray_dataset.to_pandas(), pandas_df)
@pytest.mark.xfail(
Engine.get() == "Ray" and version.parse(ray.__version__) <= version.parse("2.9.3"),
reason="Ray-2.9.3 has a problem using pandas 2.2.0. It will be resolved in the next release of Ray.",
)
@pytest.mark.skipif(
condition=Engine.get() != "Ray",
reason="Modin Dataframe can only be converted to a Ray Dataset if Modin uses a Ray engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_series_to_ray():
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
# A Pandas DataFrame with column names of non-str types is not supported by Ray Dataset.
index = [str(x) for x in index]
pandas_df = pandas.DataFrame(TEST_DATA, index=index)
pandas_s = pandas_df.iloc[0]
modin_s = pd.Series(pandas_s)
ray_dataset = modin_s.modin.to_ray()
df_equals(ray_dataset.to_pandas().squeeze(), pandas_s)
@pytest.mark.xfail(
Engine.get() == "Ray" and version.parse(ray.__version__) <= version.parse("2.9.3"),
reason="Ray-2.9.3 has a problem using pandas 2.2.0. It will be resolved in the next release of Ray.",
)
@pytest.mark.skipif(
condition=Engine.get() != "Ray",
reason="Ray Dataset can only be converted to a Modin Dataframe if Modin uses a Ray engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_from_ray():
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
modin_df, pandas_df = create_test_dfs(TEST_DATA, index=index)
ray_df = ray.data.from_pandas(pandas_df)
result_df = from_ray(ray_df)
df_equals(result_df, modin_df)
@pytest.mark.skipif(
condition=Engine.get() != "Dask",
reason="Modin DataFrame can only be converted to a Dask DataFrame if Modin uses a Dask engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_df_to_dask():
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
modin_df, pandas_df = create_test_dfs(TEST_DATA, index=index)
dask_df = modin_df.modin.to_dask()
df_equals(dask_df.compute(), pandas_df)
@pytest.mark.skipif(
condition=Engine.get() != "Dask",
reason="Modin DataFrame can only be converted to a Dask DataFrame if Modin uses a Dask engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_series_to_dask():
modin_s, pandas_s = create_test_series(TEST_DATA["col1"])
dask_series = modin_s.modin.to_dask()
df_equals(dask_series.compute(), pandas_s)
@pytest.mark.skipif(
condition=Engine.get() != "Dask",
reason="Dask DataFrame can only be converted to a Modin DataFrame if Modin uses a Dask engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_from_dask():
import dask.dataframe as dd
index = pandas.DatetimeIndex(
pandas.date_range("2000", freq="h", periods=len(TEST_DATA["col1"]))
)
modin_df, pandas_df = create_test_dfs(TEST_DATA, index=index)
dask_df = dd.from_pandas(pandas_df, npartitions=NPartitions.get())
result_df = from_dask(dask_df)
df_equals(result_df, modin_df)
@pytest.mark.skipif(
condition=Engine.get() not in ("Ray", "Dask", "Unidist"),
reason="Modin DataFrame can only be created from map if Modin uses Ray, Dask or MPI engine.",
)
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
def test_from_map():
factor = 3
data = [1] * factor + [2] * factor + [3] * factor
expected_df = pd.DataFrame(data, index=[0, 1, 2] * factor)
def map_func(x, factor):
return [x] * factor
result_df = from_map(map_func, [1, 2, 3], 3)
df_equals(result_df, expected_df)
|
TestSpss
|
python
|
django__django
|
django/contrib/postgres/indexes.py
|
{
"start": 2890,
"end": 4030
}
|
class ____(PostgresIndex):
suffix = "brin"
def __init__(
self, *expressions, autosummarize=None, pages_per_range=None, **kwargs
):
if pages_per_range is not None and pages_per_range <= 0:
raise ValueError("pages_per_range must be None or a positive integer")
self.autosummarize = autosummarize
self.pages_per_range = pages_per_range
super().__init__(*expressions, **kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.autosummarize is not None:
kwargs["autosummarize"] = self.autosummarize
if self.pages_per_range is not None:
kwargs["pages_per_range"] = self.pages_per_range
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.autosummarize is not None:
with_params.append(
"autosummarize = %s" % ("on" if self.autosummarize else "off")
)
if self.pages_per_range is not None:
with_params.append("pages_per_range = %d" % self.pages_per_range)
return with_params
|
BrinIndex
|
python
|
jschneier__django-storages
|
storages/backends/gcloud.py
|
{
"start": 1312,
"end": 3572
}
|
class ____(CompressedFileMixin, File):
def __init__(self, name, mode, storage):
self.name = name
self.mime_type, self.mime_encoding = mimetypes.guess_type(name)
self._mode = mode
self._storage = storage
self.blob = storage.bucket.get_blob(name, chunk_size=storage.blob_chunk_size)
if not self.blob and "w" in mode:
self.blob = Blob(
self.name, storage.bucket, chunk_size=storage.blob_chunk_size
)
self._file = None
self._is_dirty = False
@property
def size(self):
return self.blob.size
def _get_file(self):
if self._file is None:
self._file = SpooledTemporaryFile(
max_size=self._storage.max_memory_size,
suffix=".GSStorageFile",
dir=setting("FILE_UPLOAD_TEMP_DIR"),
)
if "r" in self._mode:
self._is_dirty = False
# This automatically decompresses the file
self.blob.download_to_file(self._file, checksum="crc32c")
self._file.seek(0)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, num_bytes=None):
if "r" not in self._mode:
raise AttributeError("File was not opened in read mode.")
if num_bytes is None:
num_bytes = -1
return super().read(num_bytes)
def write(self, content):
if "w" not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
return super().write(to_bytes(content))
def close(self):
if self._file is not None:
if self._is_dirty:
blob_params = self._storage.get_object_parameters(self.name)
self.blob.upload_from_file(
self.file,
rewind=True,
content_type=self.mime_type,
retry=DEFAULT_RETRY,
predefined_acl=blob_params.get("acl", self._storage.default_acl),
)
self._file.close()
self._file = None
@deconstructible
|
GoogleCloudFile
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-substring-of-one-repeating-character.py
|
{
"start": 2456,
"end": 3755
}
|
class ____(object):
def __init__(self, N,
build_fn=lambda _: float("inf"),
query_fn=lambda x, y: y if x is None else x if y is None else min(x, y),
update_fn=lambda x: x):
self.tree = [None]*(2*2**((N-1).bit_length()))
self.base = len(self.tree)//2
self.query_fn = query_fn
self.update_fn = update_fn
for i in xrange(self.base, self.base+N):
self.tree[i] = build_fn(i-self.base)
for i in reversed(xrange(1, self.base)):
self.tree[i] = query_fn(self.tree[2*i], self.tree[2*i+1])
def update(self, i, h):
x = self.base+i
self.tree[x] = self.update_fn(h)
while x > 1:
x //= 2
self.tree[x] = self.query_fn(self.tree[x*2], self.tree[x*2+1])
def query(self, L, R):
if L > R:
return None
L += self.base
R += self.base
left = right = None
while L <= R:
if L & 1:
left = self.query_fn(left, self.tree[L])
L += 1
if R & 1 == 0:
right = self.query_fn(self.tree[R], right)
R -= 1
L //= 2
R //= 2
return self.query_fn(left, right)
# segment tree
|
SegmentTree2
|
python
|
coleifer__peewee
|
playhouse/dataset.py
|
{
"start": 14285,
"end": 14488
}
|
class ____(CSVImporter):
def load(self, file_obj, header=True, **kwargs):
kwargs.setdefault('delimiter', '\t')
return super(TSVImporter, self).load(file_obj, header, **kwargs)
|
TSVImporter
|
python
|
getsentry__sentry
|
src/sentry/models/teamreplica.py
|
{
"start": 373,
"end": 1235
}
|
class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
team_id = HybridCloudForeignKey("sentry.Team", on_delete="CASCADE")
organization_id = HybridCloudForeignKey("sentry.Organization", on_delete="CASCADE")
slug = models.SlugField()
name = models.CharField(max_length=64)
status = BoundedPositiveIntegerField()
date_added = models.DateTimeField(default=timezone.now)
org_role = models.CharField(max_length=32, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_teamreplica"
unique_together = (("organization_id", "slug"),)
__repr__ = sane_repr("name", "slug")
def get_audit_log_data(self) -> dict[str, Any]:
return {
"id": self.id,
"slug": self.slug,
"name": self.name,
"status": self.status,
}
|
TeamReplica
|
python
|
sympy__sympy
|
sympy/polys/agca/homomorphisms.py
|
{
"start": 17749,
"end": 21937
}
|
class ____(MatrixHomomorphism):
"""
Concrete class for homomorphism with domain a submodule of a free module
or a quotient thereof.
Do not instantiate; the constructor does not check that your data is well
defined. Use the ``homomorphism`` function instead:
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> M = QQ.old_poly_ring(x).free_module(2)*x
>>> homomorphism(M, M, [[1, 0], [0, 1]])
Matrix([
[1, 0], : <[x, 0], [0, x]> -> <[x, 0], [0, x]>
[0, 1]])
"""
def _apply(self, elem):
if isinstance(self.domain, SubQuotientModule):
elem = elem.data
return sum(x * e for x, e in zip(elem, self.matrix))
def _image(self):
return self.codomain.submodule(*[self(x) for x in self.domain.gens])
def _kernel(self):
syz = self.image().syzygy_module()
return self.domain.submodule(
*[sum(xi*gi for xi, gi in zip(s, self.domain.gens))
for s in syz.gens])
def homomorphism(domain, codomain, matrix):
r"""
Create a homomorphism object.
This function tries to build a homomorphism from ``domain`` to ``codomain``
via the matrix ``matrix``.
Examples
========
>>> from sympy import QQ
>>> from sympy.abc import x
>>> from sympy.polys.agca import homomorphism
>>> R = QQ.old_poly_ring(x)
>>> T = R.free_module(2)
If ``domain`` is a free module generated by `e_1, \ldots, e_n`, then
``matrix`` should be an n-element iterable `(b_1, \ldots, b_n)` where
the `b_i` are elements of ``codomain``. The constructed homomorphism is the
unique homomorphism sending `e_i` to `b_i`.
>>> F = R.free_module(2)
>>> h = homomorphism(F, T, [[1, x], [x**2, 0]])
>>> h
Matrix([
[1, x**2], : QQ[x]**2 -> QQ[x]**2
[x, 0]])
>>> h([1, 0])
[1, x]
>>> h([0, 1])
[x**2, 0]
>>> h([1, 1])
[x**2 + 1, x]
If ``domain`` is a submodule of a free module, them ``matrix`` determines
a homomoprhism from the containing free module to ``codomain``, and the
homomorphism returned is obtained by restriction to ``domain``.
>>> S = F.submodule([1, 0], [0, x])
>>> homomorphism(S, T, [[1, x], [x**2, 0]])
Matrix([
[1, x**2], : <[1, 0], [0, x]> -> QQ[x]**2
[x, 0]])
If ``domain`` is a (sub)quotient `N/K`, then ``matrix`` determines a
homomorphism from `N` to ``codomain``. If the kernel contains `K`, this
homomorphism descends to ``domain`` and is returned; otherwise an exception
is raised.
>>> homomorphism(S/[(1, 0)], T, [0, [x**2, 0]])
Matrix([
[0, x**2], : <[1, 0] + <[1, 0]>, [0, x] + <[1, 0]>, [1, 0] + <[1, 0]>> -> QQ[x]**2
[0, 0]])
>>> homomorphism(S/[(0, x)], T, [0, [x**2, 0]])
Traceback (most recent call last):
...
ValueError: kernel <[1, 0], [0, 0]> must contain sm, got <[0,x]>
"""
def freepres(module):
"""
Return a tuple ``(F, S, Q, c)`` where ``F`` is a free module, ``S`` is a
submodule of ``F``, and ``Q`` a submodule of ``S``, such that
``module = S/Q``, and ``c`` is a conversion function.
"""
if isinstance(module, FreeModule):
return module, module, module.submodule(), lambda x: module.convert(x)
if isinstance(module, QuotientModule):
return (module.base, module.base, module.killed_module,
lambda x: module.convert(x).data)
if isinstance(module, SubQuotientModule):
return (module.base.container, module.base, module.killed_module,
lambda x: module.container.convert(x).data)
# an ordinary submodule
return (module.container, module, module.submodule(),
lambda x: module.container.convert(x))
SF, SS, SQ, _ = freepres(domain)
TF, TS, TQ, c = freepres(codomain)
# NOTE this is probably a bit inefficient (redundant checks)
return FreeModuleHomomorphism(SF, TF, [c(x) for x in matrix]
).restrict_domain(SS).restrict_codomain(TS
).quotient_codomain(TQ).quotient_domain(SQ)
|
SubModuleHomomorphism
|
python
|
tensorflow__tensorflow
|
tensorflow/python/keras/saving/saved_model/layer_serialization.py
|
{
"start": 6932,
"end": 7304
}
|
class ____(LayerSavedModelSaver):
"""Index lookup layer serialization."""
@property
def python_properties(self):
# TODO(kathywu): Add python property validator
metadata = self._python_properties_internal()
if metadata['config'].get('has_static_table', False):
metadata['config']['vocabulary'] = None
return metadata
|
IndexLookupLayerSavedModelSaver
|
python
|
sphinx-doc__sphinx
|
doc/usage/extensions/example_google.py
|
{
"start": 5387,
"end": 9165
}
|
class ____:
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (:obj:`int`, optional): Description of `attr2`.
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (:obj:`int`, optional): Description of `param2`. Multiple
lines are supported.
param3 (list(str)): Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: list(str): Doc comment *before* attribute, with type specified
self.attr4 = ['attr4']
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return 'readonly_property'
@property
def readwrite_property(self):
"""list(str): Properties with both a getter and setter
should only be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ['readwrite_property']
@readwrite_property.setter
def readwrite_property(self, value):
_ = value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
|
ExampleClass
|
python
|
fastapi__sqlmodel
|
sqlmodel/orm/session.py
|
{
"start": 840,
"end": 5763
}
|
class ____(_Session):
@overload
def exec(
self,
statement: Select[_TSelectParam],
*,
params: Optional[Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]] = None,
execution_options: Mapping[str, Any] = util.EMPTY_DICT,
bind_arguments: Optional[Dict[str, Any]] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> TupleResult[_TSelectParam]: ...
@overload
def exec(
self,
statement: SelectOfScalar[_TSelectParam],
*,
params: Optional[Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]] = None,
execution_options: Mapping[str, Any] = util.EMPTY_DICT,
bind_arguments: Optional[Dict[str, Any]] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> ScalarResult[_TSelectParam]: ...
@overload
def exec(
self,
statement: UpdateBase,
*,
params: Optional[Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]] = None,
execution_options: Mapping[str, Any] = util.EMPTY_DICT,
bind_arguments: Optional[Dict[str, Any]] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> CursorResult[Any]: ...
def exec(
self,
statement: Union[
Select[_TSelectParam],
SelectOfScalar[_TSelectParam],
Executable[_TSelectParam],
UpdateBase,
],
*,
params: Optional[Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]] = None,
execution_options: Mapping[str, Any] = util.EMPTY_DICT,
bind_arguments: Optional[Dict[str, Any]] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Union[
TupleResult[_TSelectParam], ScalarResult[_TSelectParam], CursorResult[Any]
]:
results = super().execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_parent_execute_state=_parent_execute_state,
_add_event=_add_event,
)
if isinstance(statement, SelectOfScalar):
return results.scalars()
return results # type: ignore
@deprecated(
"""
🚨 You probably want to use `session.exec()` instead of `session.execute()`.
This is the original SQLAlchemy `session.execute()` method that returns objects
of type `Row`, and that you have to call `scalars()` to get the model objects.
For example:
```Python
heroes = session.execute(select(Hero)).scalars().all()
```
instead you could use `exec()`:
```Python
heroes = session.exec(select(Hero)).all()
```
""",
category=None,
)
def execute(
self,
statement: _Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[Dict[str, Any]] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Any]:
"""
🚨 You probably want to use `session.exec()` instead of `session.execute()`.
This is the original SQLAlchemy `session.execute()` method that returns objects
of type `Row`, and that you have to call `scalars()` to get the model objects.
For example:
```Python
heroes = session.execute(select(Hero)).scalars().all()
```
instead you could use `exec()`:
```Python
heroes = session.exec(select(Hero)).all()
```
"""
return super().execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_parent_execute_state=_parent_execute_state,
_add_event=_add_event,
)
@deprecated(
"""
🚨 You probably want to use `session.exec()` instead of `session.query()`.
`session.exec()` is SQLModel's own short version with increased type
annotations.
Or otherwise you might want to use `session.execute()` instead of
`session.query()`.
"""
)
def query( # type: ignore
self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any
) -> _Query[Any]:
"""
🚨 You probably want to use `session.exec()` instead of `session.query()`.
`session.exec()` is SQLModel's own short version with increased type
annotations.
Or otherwise you might want to use `session.execute()` instead of
`session.query()`.
"""
return super().query(*entities, **kwargs)
|
Session
|
python
|
huggingface__transformers
|
tests/models/afmoe/test_modeling_afmoe.py
|
{
"start": 898,
"end": 3298
}
|
class ____(CausalLMModelTester):
if is_torch_available():
base_model_class = AfmoeModel
def __init__(
self,
parent,
batch_size=4,
seq_length=128,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=64,
hidden_size=32,
intermediate_size=16,
moe_intermediate_size=16,
num_hidden_layers=2,
num_dense_layers=1,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=128,
hidden_act="silu",
max_position_embeddings=16384,
initializer_range=0.02,
rms_norm_eps=1e-5,
use_cache=False,
rope_theta=10000.0,
rope_parameters=None,
num_experts=4,
num_experts_per_tok=2,
num_shared_experts=2,
route_norm=True,
route_scale=1.0,
global_attn_every_n_layers=2,
sliding_window=128,
attention_dropout=0.0,
):
super().__init__(
parent=parent,
batch_size=batch_size,
seq_length=seq_length,
is_training=is_training,
use_input_mask=use_input_mask,
use_token_type_ids=use_token_type_ids,
use_labels=use_labels,
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
num_key_value_heads=num_key_value_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
max_position_embeddings=max_position_embeddings,
initializer_range=initializer_range,
)
self.use_cache = use_cache
self.head_dim = head_dim
self.rms_norm_eps = rms_norm_eps
self.rope_theta = rope_theta
self.moe_intermediate_size = moe_intermediate_size
self.num_dense_layers = num_dense_layers
self.num_experts = num_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_shared_experts = num_shared_experts
self.route_norm = route_norm
self.route_scale = route_scale
self.global_attn_every_n_layers = global_attn_every_n_layers
self.sliding_window = sliding_window
self.attention_dropout = attention_dropout
@require_torch
|
AfmoeModelTester
|
python
|
scrapy__scrapy
|
tests/test_pipelines.py
|
{
"start": 1818,
"end": 2203
}
|
class ____:
async def process_item(self, item):
d1 = Deferred()
from twisted.internet import reactor
reactor.callLater(0, d1.callback, None)
await d1
d2 = Deferred()
reactor.callLater(0, d2.callback, None)
await maybe_deferred_to_future(d2)
item["pipeline_passed"] = True
return item
|
AsyncDefNotAsyncioPipeline
|
python
|
numpy__numpy
|
numpy/_build_utils/tempita/_tempita.py
|
{
"start": 16639,
"end": 16857
}
|
class ____:
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__name)
|
TemplateObject
|
python
|
openai__openai-python
|
src/openai/resources/beta/chatkit/chatkit.py
|
{
"start": 1646,
"end": 2687
}
|
class ____(AsyncAPIResource):
@cached_property
def sessions(self) -> AsyncSessions:
return AsyncSessions(self._client)
@cached_property
def threads(self) -> AsyncThreads:
return AsyncThreads(self._client)
@cached_property
def with_raw_response(self) -> AsyncChatKitWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncChatKitWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncChatKitWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncChatKitWithStreamingResponse(self)
|
AsyncChatKit
|
python
|
doocs__leetcode
|
solution/3600-3699/3678.Smallest Absent Positive Greater Than Average/Solution.py
|
{
"start": 0,
"end": 205
}
|
class ____:
def smallestAbsent(self, nums: List[int]) -> int:
s = set(nums)
ans = max(1, sum(nums) // len(nums) + 1)
while ans in s:
ans += 1
return ans
|
Solution
|
python
|
django__django
|
tests/defer_regress/models.py
|
{
"start": 1081,
"end": 1139
}
|
class ____(Item):
class Meta:
proxy = True
|
Proxy
|
python
|
pandas-dev__pandas
|
pandas/_typing.py
|
{
"start": 2790,
"end": 7616
}
|
class ____(Protocol[_T_co]):
__module__: str = "pandas.api.typing.aliases"
@overload
def __getitem__(self, index: SupportsIndex, /) -> _T_co: ...
@overload
def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ...
def __contains__(self, value: object, /) -> bool: ...
def __len__(self) -> int: ...
def __iter__(self) -> Iterator[_T_co]: ...
def index(self, value: Any, start: int = ..., stop: int = ..., /) -> int: ...
def count(self, value: Any, /) -> int: ...
def __reversed__(self) -> Iterator[_T_co]: ...
ListLike: TypeAlias = AnyArrayLike | SequenceNotStr | range
# scalars
PythonScalar: TypeAlias = str | float | bool
DatetimeLikeScalar: TypeAlias = Union["Period", "Timestamp", "Timedelta"]
# aligned with pandas-stubs - typical scalars found in Series. Explicitly leaves
# out object
_IndexIterScalar: TypeAlias = Union[
str,
bytes,
date,
datetime,
timedelta,
np.datetime64,
np.timedelta64,
bool,
int,
float,
"Timestamp",
"Timedelta",
]
Scalar: TypeAlias = Union[
_IndexIterScalar, "Interval", complex, np.integer, np.floating, np.complexfloating
]
IntStrT = TypeVar("IntStrT", bound=int | str)
# timestamp and timedelta convertible types
TimestampConvertibleTypes: TypeAlias = Union[
"Timestamp", date, np.datetime64, np.int64, float, str
]
TimestampNonexistent: TypeAlias = (
Literal["shift_forward", "shift_backward", "NaT", "raise"] | timedelta
)
TimedeltaConvertibleTypes: TypeAlias = Union[
"Timedelta", timedelta, np.timedelta64, np.int64, float, str
]
Timezone: TypeAlias = str | tzinfo
ToTimestampHow: TypeAlias = Literal["s", "e", "start", "end"]
# NDFrameT is stricter and ensures that the same subclass of NDFrame always is
# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a
# Series is passed into a function, a Series is always returned and if a DataFrame is
# passed in, a DataFrame is always returned.
NDFrameT = TypeVar("NDFrameT", bound="NDFrame")
IndexT = TypeVar("IndexT", bound="Index")
FreqIndexT = TypeVar("FreqIndexT", "DatetimeIndex", "PeriodIndex", "TimedeltaIndex")
NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index")
AxisInt: TypeAlias = int
Axis: TypeAlias = AxisInt | Literal["index", "columns", "rows"]
IndexLabel: TypeAlias = Hashable | Sequence[Hashable]
Level: TypeAlias = Hashable
Shape: TypeAlias = tuple[int, ...]
Suffixes: TypeAlias = Sequence[str | None]
Ordered: TypeAlias = bool | None
JSONSerializable: TypeAlias = PythonScalar | list | dict | None
Frequency: TypeAlias = Union[str, "BaseOffset"]
Axes: TypeAlias = ListLike
RandomState: TypeAlias = (
int
| np.ndarray
| np.random.Generator
| np.random.BitGenerator
| np.random.RandomState
)
# dtypes
NpDtype: TypeAlias = str | np.dtype | type[str | complex | bool | object]
Dtype: TypeAlias = Union["ExtensionDtype", NpDtype]
AstypeArg: TypeAlias = Union["ExtensionDtype", npt.DTypeLike]
# DtypeArg specifies all allowable dtypes in a functions its dtype argument
DtypeArg: TypeAlias = Dtype | Mapping[Hashable, Dtype]
DtypeObj: TypeAlias = Union[np.dtype, "ExtensionDtype"]
# converters
ConvertersArg: TypeAlias = dict[Hashable, Callable[[Dtype], Dtype]]
# parse_dates
ParseDatesArg: TypeAlias = (
bool | list[Hashable] | list[list[Hashable]] | dict[Hashable, list[Hashable]]
)
# For functions like rename that convert one label to another
Renamer: TypeAlias = Mapping[Any, Hashable] | Callable[[Any], Hashable]
# to maintain type information across generic functions and parametrization
T = TypeVar("T")
# used in decorators to preserve the signature of the function it decorates
# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
FuncType: TypeAlias = Callable[..., Any]
F = TypeVar("F", bound=FuncType)
TypeT = TypeVar("TypeT", bound=type)
# types of vectorized key functions for DataFrame::sort_values and
# DataFrame::sort_index, among others
ValueKeyFunc: TypeAlias = Callable[["Series"], Union["Series", AnyArrayLike]] | None
IndexKeyFunc: TypeAlias = Callable[["Index"], Union["Index", AnyArrayLike]] | None
# types of `func` kwarg for DataFrame.aggregate and Series.aggregate
AggFuncTypeBase: TypeAlias = Callable | str
AggFuncTypeDict: TypeAlias = MutableMapping[
Hashable, AggFuncTypeBase | list[AggFuncTypeBase]
]
AggFuncType: TypeAlias = AggFuncTypeBase | list[AggFuncTypeBase] | AggFuncTypeDict
AggObjType: TypeAlias = Union[
"Series",
"DataFrame",
"GroupBy",
"SeriesGroupBy",
"DataFrameGroupBy",
"BaseWindow",
"Resampler",
]
PythonFuncType: TypeAlias = Callable[[Any], Any]
# filenames and file-like-objects
AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True)
AnyStr_contra = TypeVar("AnyStr_contra", str, bytes, contravariant=True)
|
SequenceNotStr
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/tasks.py
|
{
"start": 500399,
"end": 501448
}
|
class ____(Response):
"""
Response of tasks.share endpoint.
:param changed: The number of updated tasks
:type changed: int
"""
_service = "tasks"
_action = "share"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"changed": {
"description": "The number of updated tasks",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, changed=None, **kwargs):
super(ShareResponse, self).__init__(**kwargs)
self.changed = changed
@schema_property("changed")
def changed(self):
return self._property_changed
@changed.setter
def changed(self, value):
if value is None:
self._property_changed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "changed", six.integer_types)
self._property_changed = value
|
ShareResponse
|
python
|
kamyu104__LeetCode-Solutions
|
Python/circular-sentence.py
|
{
"start": 38,
"end": 314
}
|
class ____(object):
def isCircularSentence(self, sentence):
"""
:type sentence: str
:rtype: bool
"""
return sentence[0] == sentence[-1] and all(sentence[i-1] == sentence[i+1]for i in xrange(len(sentence)) if sentence[i] == ' ')
|
Solution
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/context_manager.py
|
{
"start": 266,
"end": 842
}
|
class ____:
def __init__(self):
self.value = ""
def __enter__(self):
self.value = _test_source()
return self
def __exit__(self, exc_type, exc_value, traceback):
return
def test_source_on_enter():
c = SourceOnEnter()
with c:
_test_sink(c.value) # Issue here.
def test_source_on_enter_as():
with SourceOnEnter() as c:
_test_sink(c.value) # Issue here.
def test_source_on_enter_clear():
c = SourceOnEnter()
with c:
c.value = ""
_test_sink(c.value) # No issue.
|
SourceOnEnter
|
python
|
redis__redis-py
|
tests/test_multidb/test_pipeline.py
|
{
"start": 645,
"end": 10559
}
|
class ____:
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
),
],
indirect=True,
)
def test_executes_pipeline_against_correct_db(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config, "default_health_checks", return_value=[mock_hc]
),
):
pipe = mock_pipe()
pipe.execute.return_value = ["OK1", "value1"]
mock_db1.client.pipeline.return_value = pipe
mock_hc.check_health.return_value = True
client = MultiDBClient(mock_multi_db_config)
try:
assert (
mock_multi_db_config.failover_strategy.set_databases.call_count == 1
)
pipe = client.pipeline()
pipe.set("key1", "value1")
pipe.get("key1")
assert pipe.execute() == ["OK1", "value1"]
assert len(mock_hc.check_health.call_args_list) == 9
finally:
client.close()
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.OPEN}},
),
],
indirect=True,
)
def test_execute_pipeline_against_correct_db_and_closed_circuit(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config, "default_health_checks", return_value=[mock_hc]
),
):
pipe = mock_pipe()
pipe.execute.return_value = ["OK1", "value1"]
mock_db1.client.pipeline.return_value = pipe
def mock_check_health(database):
if database == mock_db2:
return False
else:
return True
mock_hc.check_health.side_effect = mock_check_health
client = MultiDBClient(mock_multi_db_config)
try:
assert (
mock_multi_db_config.failover_strategy.set_databases.call_count == 1
)
with client.pipeline() as pipe:
pipe.set("key1", "value1")
pipe.get("key1")
assert pipe.execute() == ["OK1", "value1"]
assert mock_db.circuit.state == CBState.CLOSED
assert mock_db1.circuit.state == CBState.CLOSED
assert mock_db2.circuit.state == CBState.OPEN
finally:
client.close()
@pytest.mark.parametrize(
"mock_multi_db_config,mock_db, mock_db1, mock_db2",
[
(
{"health_check_probes": 1},
{"weight": 0.2, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.7, "circuit": {"state": CBState.CLOSED}},
{"weight": 0.5, "circuit": {"state": CBState.CLOSED}},
),
],
indirect=True,
)
def test_execute_pipeline_against_correct_db_on_background_health_check_determine_active_db_unhealthy(
self, mock_multi_db_config, mock_db, mock_db1, mock_db2, mock_hc
):
cb = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb.database = mock_db
mock_db.circuit = cb
cb1 = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb1.database = mock_db1
mock_db1.circuit = cb1
cb2 = PBCircuitBreakerAdapter(pybreaker.CircuitBreaker(reset_timeout=5))
cb2.database = mock_db2
mock_db2.circuit = cb2
databases = create_weighted_list(mock_db, mock_db1, mock_db2)
# Track health check runs across all databases
health_check_run = 0
# Create events for each failover scenario
db1_became_unhealthy = threading.Event()
db2_became_unhealthy = threading.Event()
db_became_unhealthy = threading.Event()
counter_lock = threading.Lock()
def mock_check_health(database):
nonlocal health_check_run
# Increment run counter for each health check call
with counter_lock:
health_check_run += 1
current_run = health_check_run
# Run 1 (health_check_run 1-3): All databases healthy
if current_run <= 3:
return True
# Run 2 (health_check_run 4-6): mock_db1 unhealthy, others healthy
elif current_run <= 6:
if database == mock_db1:
if current_run == 6:
db1_became_unhealthy.set()
return False
# Signal that db1 has become unhealthy after all 3 checks
if current_run == 6:
db1_became_unhealthy.set()
return True
# Run 3 (health_check_run 7-9): mock_db1 and mock_db2 unhealthy, mock_db healthy
elif current_run <= 9:
if database == mock_db1 or database == mock_db2:
if current_run == 9:
db2_became_unhealthy.set()
return False
# Signal that db2 has become unhealthy after all 3 checks
if current_run == 9:
db2_became_unhealthy.set()
return True
# Run 4 (health_check_run 10-12): mock_db unhealthy, others healthy
else:
if database == mock_db:
if current_run >= 12:
db_became_unhealthy.set()
return False
# Signal that db has become unhealthy after all 3 checks
if current_run >= 12:
db_became_unhealthy.set()
return True
mock_hc.check_health.side_effect = mock_check_health
with (
patch.object(mock_multi_db_config, "databases", return_value=databases),
patch.object(
mock_multi_db_config,
"default_health_checks",
return_value=[mock_hc],
),
):
pipe = mock_pipe()
pipe.execute.return_value = ["OK", "value"]
mock_db.client.pipeline.return_value = pipe
pipe1 = mock_pipe()
pipe1.execute.return_value = ["OK1", "value"]
mock_db1.client.pipeline.return_value = pipe1
pipe2 = mock_pipe()
pipe2.execute.return_value = ["OK2", "value"]
mock_db2.client.pipeline.return_value = pipe2
mock_multi_db_config.health_check_interval = 0.1
mock_multi_db_config.failover_strategy = WeightBasedFailoverStrategy()
client = MultiDBClient(mock_multi_db_config)
try:
with client.pipeline() as pipe:
pipe.set("key1", "value")
pipe.get("key1")
# Run 1: All databases healthy - should use mock_db1 (highest weight 0.7)
assert pipe.execute() == ["OK1", "value"]
# Wait for mock_db1 to become unhealthy
assert db1_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db1 to become unhealthy"
)
wait_for_condition(
lambda: cb1.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb1 to open",
)
# Run 2: mock_db1 unhealthy - should failover to mock_db2 (weight 0.5)
assert pipe.execute() == ["OK2", "value"]
# Wait for mock_db2 to become unhealthy
assert db2_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db2 to become unhealthy"
)
wait_for_condition(
lambda: cb2.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb2 to open",
)
# Run 3: mock_db1 and mock_db2 unhealthy - should use mock_db (weight 0.2)
assert pipe.execute() == ["OK", "value"]
# Wait for mock_db to become unhealthy
assert db_became_unhealthy.wait(timeout=1.0), (
"Timeout waiting for mock_db to become unhealthy"
)
wait_for_condition(
lambda: cb.state == CBState.OPEN,
timeout=0.2,
error_message="Timeout waiting for cb to open",
)
# Run 4: mock_db unhealthy, others healthy - should use mock_db1 (highest weight)
assert pipe.execute() == ["OK1", "value"]
finally:
client.close()
@pytest.mark.onlynoncluster
|
TestPipeline
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/genericType28.py
|
{
"start": 3625,
"end": 3710
}
|
class ____(Variadic_TA2[T]): ...
# This should generate an error.
|
VariadicChild_WithTA2
|
python
|
ray-project__ray
|
rllib/utils/replay_buffers/tests/test_reservoir_buffer.py
|
{
"start": 198,
"end": 4023
}
|
class ____(unittest.TestCase):
def test_timesteps_unit(self):
"""Tests adding, sampling, get-/set state, and eviction with
experiences stored by timesteps."""
self.batch_id = 0
def _add_data_to_buffer(_buffer, batch_size, num_batches=5, **kwargs):
def _generate_data():
return SampleBatch(
{
SampleBatch.T: [np.random.random((4,))],
SampleBatch.ACTIONS: [np.random.choice([0, 1])],
SampleBatch.OBS: [np.random.random((4,))],
SampleBatch.NEXT_OBS: [np.random.random((4,))],
SampleBatch.REWARDS: [np.random.rand()],
SampleBatch.TERMINATEDS: [np.random.choice([False, True])],
SampleBatch.TRUNCATEDS: [np.random.choice([False, True])],
"batch_id": [self.batch_id],
}
)
for i in range(num_batches):
data = [_generate_data() for _ in range(batch_size)]
self.batch_id += 1
batch = concat_samples(data)
_buffer.add(batch, **kwargs)
batch_size = 1
buffer_size = 100
buffer = ReservoirReplayBuffer(capacity=buffer_size)
# Put 1000 batches in a buffer with capacity 100
_add_data_to_buffer(buffer, batch_size=batch_size, num_batches=1000)
# Expect the batch id to be ~500 on average
batch_id_sum = 0
for i in range(200):
num_ts_sampled = np.random.randint(1, 10)
sample = buffer.sample(num_ts_sampled)
batch_id_sum += sum(sample["batch_id"]) / num_ts_sampled
self.assertAlmostEqual(batch_id_sum / 200, 500, delta=100)
def test_episodes_unit(self):
"""Tests adding, sampling, get-/set state, and eviction with
experiences stored by timesteps."""
self.batch_id = 0
def _add_data_to_buffer(_buffer, batch_size, num_batches=5, **kwargs):
def _generate_data():
return SampleBatch(
{
SampleBatch.T: [0, 1],
SampleBatch.ACTIONS: 2 * [np.random.choice([0, 1])],
SampleBatch.REWARDS: 2 * [np.random.rand()],
SampleBatch.OBS: 2 * [np.random.random((4,))],
SampleBatch.NEXT_OBS: 2 * [np.random.random((4,))],
SampleBatch.TERMINATEDS: [False, True],
SampleBatch.TRUNCATEDS: [False, False],
SampleBatch.AGENT_INDEX: 2 * [0],
"batch_id": 2 * [self.batch_id],
}
)
for i in range(num_batches):
data = [_generate_data() for _ in range(batch_size)]
self.batch_id += 1
batch = concat_samples(data)
_buffer.add(batch, **kwargs)
batch_size = 1
buffer_size = 100
buffer = ReservoirReplayBuffer(capacity=buffer_size, storage_unit="fragments")
# Put 1000 batches in a buffer with capacity 100
_add_data_to_buffer(buffer, batch_size=batch_size, num_batches=1000)
# Expect the batch id to be ~500 on average
batch_id_sum = 0
for i in range(200):
num_episodes_sampled = np.random.randint(1, 10)
sample = buffer.sample(num_episodes_sampled)
num_ts_sampled = num_episodes_sampled * 2
batch_id_sum += sum(sample["batch_id"]) / num_ts_sampled
self.assertAlmostEqual(batch_id_sum / 200, 500, delta=100)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
TestReservoirBuffer
|
python
|
OmkarPathak__pygorithm
|
pygorithm/geometry/polygon2.py
|
{
"start": 188,
"end": 22715
}
|
class ____(object):
"""
Define a concave polygon defined by a list of points such that each
adjacent pair of points form a line, the line from the last point to
the first point form a line, and the lines formed from the smaller
index to the larger index will walk clockwise around the polygon.
.. note::
Polygons should be used as if they were completely immutable to
ensure correctness. All attributes of Polygon2 can be reconstructed
from the points array, and thus cannot be changed on their own and
must be recalculated if there were any changes to `points`.
.. note::
To reduce unnecessary recalculations, Polygons notably do not have
an easily modifiable position. However, where relevant, the class
methods will accept offsets to the polygons. In all of these cases
the offset may be None for a minor performance improvement.
.. note::
Unfortunately, operations on rotated polygons require recalculating
the polygon based on its rotated points. This should be avoided
unless necessary through the use of Axis-Aligned Bounding Boxes
and similar tools.
.. caution::
The length of :py:attr:`~pygorithm.geometry.polygon2.Polygon2.normals`
is not necessarily the same as
:py:attr:`~pygorithm.geometry.polygon2.Polygon2.points` or
:py:attr:`~pygorithm.geometry.polygon2.Polygon2.lines`. It is only
guarranteed to have no two vectors that are the same or opposite
directions, and contain either the vector in the same direction or opposite
direction of the normal vector for every line in the polygon.
:ivar points: the ordered list of points on this polygon
:vartype points: list of :class:`pygorithm.geometry.vector2.Vector2`
:ivar lines: the ordered list of lines on this polygon
:vartype lines: list of :class:`pygorithm.geometry.line2.Line2`
:ivar normals: the unordered list of unique normals on this polygon
:vartype normals: list of :class:`pygorithm.geometry.vector2.Vector2`
:ivar center: the center of this polygon when unshifted.
:vartype center: :class:`pygorithm.geometry.vector2.Vector2`
"""
def __init__(self, points, suppress_errors = False):
"""
Create a new polygon from the set of points
.. caution::
A significant amount of calculation is performed when creating
a polygon. These should be reused whenever possible. This cost
can be alleviated somewhat by suppressing certain expensive
sanity checks, but the polygon can behave very unexpectedly
(and potentially without explicit errors) if the errors are
suppressed.
The center of the polygon is calculated as the average of the points.
The lines of the polygon are constructed using line2.
The normals of the lines are calculated using line2.
A simple linear search is done to check for repeated points.
The area is calculated to check for clockwise order using the
`Shoelace Formula <https://en.wikipedia.org/wiki/Shoelace_formula>`
The polygon is proven to be convex by ensuring the cross product of
the line from the point to previous point and point to next point is
positive or 0, for all points.
:param points: the ordered set of points on this polygon
:type points: list of :class:`pygorithm.geometry.vector2.Vector2` or \
list of (:class:`numbers.Number`, :class:`numbers.Number`)
:param suppress_errors: True to not do somewhat expensive sanity checks
:type suppress_errors: bool
:raises ValueError: if there are less than 3 points (not suppressable)
:raises ValueError: if there are any repeated points (suppressable)
:raises ValueError: if the points are not clockwise oriented (suppressable)
:raises ValueError: if the polygon is not convex (suppressable)
"""
if len(points) < 3:
raise ValueError("Not enough points (need at least 3 to define a polygon, got {}".format(len(points)))
self.points = []
self.lines = []
self.normals = []
_sum = vector2.Vector2(0, 0)
for pt in points:
act_pt = pt if type(pt) == vector2.Vector2 else vector2.Vector2(pt)
if not suppress_errors:
for prev_pt in self.points:
if math.isclose(prev_pt.x, act_pt.x) and math.isclose(prev_pt.y, act_pt.y):
raise ValueError('Repeated points! points={} (repeated={})'.format(points, act_pt))
_sum += act_pt
self.points.append(act_pt)
self.center = _sum * (1 / len(self.points))
_previous = self.points[0]
for i in range(1, len(self.points) + 1):
pt = self.points[i % len(self.points)]
_line = line2.Line2(_previous, pt)
self.lines.append(_line)
norm = vector2.Vector2(_line.normal)
if norm.x < 0 or (norm.x == 0 and norm.y == -1):
norm.x *= -1
norm.y *= -1
already_contains = next((v for v in self.normals if math.isclose(v.x, norm.x) and math.isclose(v.y, norm.y)), None)
if already_contains is None:
self.normals.append(norm)
_previous = pt
self._area = None
if not suppress_errors:
# this will check counter-clockwisedness
a = self.area
# if the polygon is convex and clockwise, if you look at any point
# and take the cross product with the line from the point to the
# previous point and the line from the point to the next point
# the result will be positive
for leftpointin in range(len(self.points)):
middlepointin = (leftpointin + 1) % len(self.points)
rightpointin = (middlepointin + 1) % len(self.points)
leftpoint = self.points[leftpointin]
middlepoint = self.points[middlepointin]
rightpoint = self.points[rightpointin]
vec1 = middlepoint - leftpoint
vec2 = middlepoint - rightpoint
cross_product = vec1.cross(vec2)
if cross_product < -1e-09:
raise ValueError('Detected concavity at index {} - {} cross {} = {}\nself={}'.format(middlepointin, vec1, vec2, cross_product, str(self)))
@classmethod
def from_regular(cls, sides, length, start_rads = None, start_degs = None, center = None):
"""
Create a new regular polygon.
.. hint::
If no rotation is specified there is always a point at ``(length, 0)``
If no center is specified, the center will be calculated such that
all the vertexes positive and the bounding box includes (0, 0). This
operation requires O(n) time (where n is the number if sides)
May specify the angle of the first point. For example, if the coordinate
system is x to the right and y upward, then if the starting offset is 0
then the first point will be at the right and the next point counter-clockwise.
This would make for the regular quad (sides=4) to look like a diamond. To make
the bottom side a square, the whole polygon needs to be rotated 45 degrees, like
so:
.. code-block:: python
from pygorithm.geometry import (vector2, polygon2)
import math
# This is a diamond shape (rotated square) (0 degree rotation assumed)
diamond = polygon2.Polygon2.from_regular(4, 1)
# This is a flat square
square = polygon2.Polygon2.from_regular(4, 1, start_degs = 45)
# Creating a flat square with radians
square2 = polygon2.Polygon2.from_regular(4, 1, math.pi / 4)
Uses the `definition of a regular polygon <https://en.wikipedia.org/wiki/Regular_polygon>`
to find the angle between each vertex in the polygon. Then converts the side
length to circumradius using the formula explained `here <http://mathworld.wolfram.com/RegularPolygon.html>`
Finally, each vertex is found using ``<radius * cos(angle), radius * sin(angle)>``
If the center is not specified, the minimum of the bounding box of the
polygon is calculated while the vertices are being found, and the inverse
of that value is offset to the rest of the points in the polygon.
:param sides: the number of sides in the polygon
:type sides: :class:`numbers.Number`
:param length: the length of any side of the polygon
:type length: :class:`numbers.Number`
:param start_rads: the starting radians or None
:type start_rads: :class:`numbers.Number` or None
:param start_degs: the starting degrees or None
:type start_degs: :class:`numbers.Number` or None
:param center: the center of the polygon
:type center: :class:`pygorithm.geometry.vector2.Vector2`
:returns: the new regular polygon
:rtype: :class:`pygorithm.geometry.polygon2.Polygon2`
:raises ValueError: if ``sides < 3`` or ``length <= 0``
:raises ValueError: if ``start_rads is not None and start_degs is not None``
"""
if (start_rads is not None) and (start_degs is not None):
raise ValueError('One or neithter of start_rads and start_degs may be defined, but not both. (got start_rads={}, start_degs={})'.format(start_rads, start_degs))
if sides < 3 or length <= 0:
raise ValueError('Too few sides or too non-positive length (sides={}, length={})'.format(sides, length))
if start_degs is not None:
start_rads = (start_degs * math.pi) / 180
if start_rads is None:
start_rads = 0
_recenter = False
radius = length / (2 * math.sin( math.pi / sides ))
if center is None:
_recenter = True
center = vector2.Vector2(0, 0)
angle = start_rads
increment = -(math.pi * 2) / sides
pts = []
_minx = 0
_miny = 0
for i in range(sides):
x = center.x + math.cos(angle) * radius
y = center.y + math.sin(angle) * radius
pts.append(vector2.Vector2(x, y))
angle += increment
if _recenter:
_minx = min(_minx, x)
_miny = min(_miny, y)
if _recenter:
_offset = vector2.Vector2(-_minx, -_miny)
for i in range(sides):
pts[i] += _offset
return cls(pts, suppress_errors = True)
@classmethod
def from_rotated(cls, original, rotation, rotation_degrees = None):
"""
Create a regular polygon that is a rotation of
a different polygon.
The rotation must be in radians, or null and rotation_degrees
must be specified. Positive rotations are clockwise.
Examples:
.. code-block:: python
from pygorithm.goemetry import (vector2, polygon2)
import math
poly = polygon2.Polygon2.from_regular(4, 1)
# the following are equivalent (within rounding)
rotated1 = polygon2.Polygon2.from_rotated(poly, math.pi / 4)
rotated2 = polygon2.Polygon2.from_rotated(poly, None, 45)
Uses the `2-d rotation matrix <https://en.wikipedia.org/wiki/Rotation_matrix>`
to rotate each point.
:param original: the polygon to rotate
:type original: :class:`pygorithm.geometry.polygon2.Polygon2`
:param rotation: the rotation in radians or None
:type rotation: :class:`numbers.Number`
:param rotation_degrees: the rotation in degrees or None
:type rotation_degrees: :class:`numbers.Number`
:returns: the rotated polygon
:rtype: :class:`pygorithm.geometry.polygon2.Polygon2`
:raises ValueError: if ``rotation is not None and rotation_degrees is not None``
:raises ValueError: if ``rotation is None and rotation_degrees is None``
"""
if (rotation is None) == (rotation_degrees is None):
raise ValueError("rotation must be specified exactly once (rotation={}, rotation_degrees={})".format(rotation, rotation_degrees))
if rotation_degrees is not None:
rotation = rotation_degrees * math.pi / 180
new_pts = []
for pt in original.points:
shifted = pt - original.center
new_pts.append(vector2.Vector2(original.center.x + shifted.x * math.cos(rotation) - shifted.y * math.sin(rotation),
original.center.y + shifted.y * math.cos(rotation) + shifted.x * math.sin(rotation)))
result = cls(new_pts, suppress_errors = True)
result._area = original._area
return result
@property
def area(self):
"""
Get the area of this polygon. Lazily initialized.
Uses the `Shoelace Formula <https://en.wikipedia.org/wiki/Shoelace_formula>` to
calculate the signed area, allowing this to also test for correct polygon
orientation.
:returns: area of this polygon
:rtype: :class:`numbers.Number`
:raises ValueError: if the polygon is not in clockwise order
"""
if self._area is None:
_edgesum = 0
_previous = self.points[0]
for i in range(1, len(self.points) + 1):
pt = self.points[i % len(self.points)]
_edgesum += (pt.x - _previous.x) * (pt.y + _previous.y)
_previous = pt
if _edgesum < 0:
raise ValueError("Points are counter-clockwise oriented (signed square area: {})".format(_edgesum))
self._area = _edgesum / 2
return self._area
@staticmethod
def project_onto_axis(polygon, offset, axis):
"""
Find the projection of the polygon along the axis.
Uses the `dot product <https://en.wikipedia.org/wiki/Dot_product>`
of each point on the polygon to project those points onto the axis,
and then finds the extremes of the projection.
:param polygon: the polygon to project
:type polygon: :class:`pygorithm.geometry.polygon2.Polygon2`
:param offset: the offset of the polygon
:type offset: :class:`pygorithm.geometry.vector2.Vector2`
:param axis: the axis to project onto
:type axis: :class:`pygorithm.geometry.vector2.Vector2`
:returns: the projection of the polygon along the axis
:rtype: :class:`pygorithm.geometry.axisall.AxisAlignedLine`
"""
dot_min = None
dot_max = None
for pt in polygon.points:
dot = (pt + offset).dot(axis)
dot_min = min(dot, dot_min) if dot_min is not None else dot
dot_max = max(dot, dot_max) if dot_max is not None else dot
return axisall.AxisAlignedLine(axis, dot_min, dot_max)
@staticmethod
def contains_point(polygon, offset, point):
"""
Determine if the polygon at offset contains point.
Distinguish between points that are on the edge of the polygon and
points that are completely contained by the polygon.
.. tip::
This can never return True, True
This finds the cross product of this point and the two points comprising
every line on this polygon. If any are 0, this is an edge. Otherwise,
they must all be negative (when traversed clockwise).
:param polygon: the polygon
:type polygon: :class:`pygorithm.geometry.polygon2.Polygon2`
:param offset: the offset of the polygon
:type offset: :class:`pygorithm.geometry.vector2.Vector2` or None
:param point: the point to check
:type point: :class:`pygorithm.geometry.vector2.Vector2`
:returns: on edge, contained
:rtype: bool, bool
"""
_previous = polygon.points[0]
for i in range(1, len(polygon.points) + 1):
curr = polygon.points[i % len(polygon.points)]
vec1 = _previous + offset - point
vec2 = curr + offset - point
cross = vec1.cross(vec2)
_previous = curr
if math.isclose(cross, 0, abs_tol=1e-07):
return True, False
if cross > 0:
return False, False
return False, True
@staticmethod
def find_intersection(poly1, poly2, offset1, offset2, find_mtv = True):
"""
Find if the polygons are intersecting and how to resolve it.
Distinguish between polygons that are sharing 1 point or a single line
(touching) as opposed to polygons that are sharing a 2-dimensional
amount of space.
The resulting MTV should be applied to the first polygon (or its offset),
or its negation can be applied to the second polygon (or its offset).
The MTV will be non-null if overlapping is True and find_mtv is True.
.. note::
There is only a minor performance improvement from setting find_mtv to
False. It is rarely an improvement to first check without finding
mtv and then to find the mtv.
.. caution::
The first value in the mtv could be negative (used to inverse the direction
of the axis)
This uses the `Seperating Axis Theorem <http://www.dyn4j.org/2010/01/sat/> to
calculate intersection.
:param poly1: the first polygon
:type poly1: :class:`pygorithm.geometry.polygon2.Polygon2`
:param poly2: the second polygon
:type poly2: :class:`pygorithm.geometry.polygon2.Polygon2`
:param offset1: the offset of the first polygon
:type offset1: :class:`pygorithm.geometry.vector2.Vector2` or None
:param offset2: the offset of the second polygon
:type offset2: :class:`pygorithm.geometry.vector2.Vector2` or None
:param find_mtv: if False, the mtv is always None and there is a small \
performance improvement
:type find_mtv: bool
:returns: (touching, overlapping, (mtv distance, mtv axis))
:rtype: (bool, bool, (:class:`numbers.Number`, :class:`pygorithm.geometry.vector2.Vector2`) or None)
"""
unique_normals = list(poly1.normals)
for n in poly2.normals:
found = False
for old_n in poly1.normals:
if math.isclose(n.x, old_n.x) and math.isclose(n.y, old_n.y):
found = True
break
if not found:
unique_normals.append(n)
not_overlapping = False
best_mtv = None
for norm in unique_normals:
proj1 = Polygon2.project_onto_axis(poly1, offset1, norm)
proj2 = Polygon2.project_onto_axis(poly2, offset2, norm)
touch, mtv = axisall.AxisAlignedLine.find_intersection(proj1, proj2)
if not touch:
return False, False, None
if mtv[0] is None:
not_overlapping = True
best_mtv = None
elif find_mtv and not not_overlapping:
if best_mtv is None or abs(mtv[0]) < abs(best_mtv[0]):
best_mtv = (mtv[0], norm)
if not_overlapping:
return True, False, None
else:
return False, True, best_mtv
@staticmethod
def _create_link(pts):
"""
Create a webmath link to display the polygon.
This isn't a perfect drawing since it doesn't show connections (so order is
invisible). Avoid programatically connecting to the website. This is mostly
used because it's very difficult to visualize polygons from lists of points.
:param pts: a set of points (order, number, etc. are irrelevant)
:type pts: list of :class:`pygorithm.geometry.vector2.Vector2`
"""
param0 = "+".join(('%28{}%2C+{}%29'.format(round(v.x, 3), round(v.y, 3))) for v in pts)
xmin = pts[0].x
xmax = xmin
ymin = pts[1].y
ymax = ymin
for v in pts:
xmin = min(xmin, v.x)
xmax = max(xmax, v.x)
ymin = min(ymin, v.y)
ymax = max(ymax, v.y)
return "www.webmath.com/cgi-bin/grapher.cgi?param0={}&xmin={}&xmax={}&ymin={}&ymax={}&to_plot=points".format(param0, xmin-5, xmax+5, ymin-5, ymax+5)
def __repr__(self):
"""
Creates an unambiguous representation of this polygon, only
showing the list of points.
:returns: unambiguous representation of this polygon
:rtype: string
"""
return "polygon2(points={})".format(self.points)
def __str__(self):
"""
Creates a human-readable representation of this polygon and
includes a link to visualize it
:returns: human-readable representation
:rtype: string
"""
return "polygon2(points={}, view={})".format(', '.join(str(p) for p in self.points), Polygon2._create_link(self.points))
|
Polygon2
|
python
|
django__django
|
tests/i18n/tests.py
|
{
"start": 18271,
"end": 19753
}
|
class ____(SimpleTestCase):
def setUp(self):
"""Clear translation state."""
self._old_language = get_language()
self._old_translations = trans_real._translations
deactivate()
trans_real._translations = {}
def tearDown(self):
trans_real._translations = self._old_translations
activate(self._old_language)
@override_settings(
USE_I18N=True,
LANGUAGE_CODE="en",
LANGUAGES=[
("en", "English"),
("en-ca", "English (Canada)"),
("en-nz", "English (New Zealand)"),
("en-au", "English (Australia)"),
],
LOCALE_PATHS=[os.path.join(here, "loading")],
INSTALLED_APPS=["i18n.loading_app"],
)
def test_translation_loading(self):
"""
"loading_app" does not have translations for all languages provided by
"loading". Catalogs are merged correctly.
"""
tests = [
("en", "local country person"),
("en_AU", "aussie"),
("en_NZ", "kiwi"),
("en_CA", "canuck"),
]
# Load all relevant translations.
for language, _ in tests:
activate(language)
# Catalogs are merged correctly.
for language, nickname in tests:
with self.subTest(language=language):
activate(language)
self.assertEqual(gettext("local country person"), nickname)
|
TranslationLoadingTests
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/enums.py
|
{
"start": 149,
"end": 246
}
|
class ____(str, Enum):
SEQUENCE = "SEQUENCE"
PORTFOLIO = "PORTFOLIO"
|
ArtifactCollectionType
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/datasets.py
|
{
"start": 195848,
"end": 204945
}
|
class ____(Request):
"""
Gets the version tree of a dataset.
:param start_from: Dataset ID
:type start_from: str
:param dataset: Get versions starting from this time
:type dataset: str
:param only_fields: List of version fields to fetch
:type only_fields: Sequence[str]
:param versions: List of version IDs to fetch
:type versions: Sequence[str]
:param only_published: Return only published version.
:type only_published: bool
:param page: Page number, returns a specific page out of the result list of
datasets.
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page. Defaults to [created].
:type order_by: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param tags: User-defined tags filter. Use '-' for exclusion
:type tags: Sequence[str]
:param system_tags: System tags filter. Use '-' for exclusion
:type system_tags: Sequence[str]
"""
_service = "datasets"
_action = "get_versions"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataset": {
"description": "Get versions starting from this time",
"type": "string",
},
"only_fields": {
"description": "List of version fields to fetch",
"items": {"type": "string"},
"type": "array",
},
"only_published": {
"default": True,
"description": "Return only published version.",
"type": "boolean",
},
"order_by": {
"description": (
"List of field names to order by. When search_text is used, '@text_score' can be used as a field"
" representing the text score of returned documents. Use '-' prefix to specify descending order."
" Optional, recommended when using page. Defaults to [created]."
),
"items": {"type": "string"},
"type": "array",
},
"page": {
"description": "Page number, returns a specific page out of the result list of datasets.",
"minimum": 0,
"type": "integer",
},
"page_size": {
"description": (
"Page size, specifies the number of results returned in each page (last page may contain fewer "
"results)"
),
"minimum": 1,
"type": "integer",
},
"search_text": {"description": "Free text search query", "type": "string"},
"start_from": {"description": "Dataset ID", "type": ["string", "null"]},
"system_tags": {
"description": "System tags filter. Use '-' for exclusion",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags filter. Use '-' for exclusion",
"items": {"type": "string"},
"type": "array",
},
"versions": {
"description": "List of version IDs to fetch",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["dataset"],
"type": "object",
}
def __init__(
self,
dataset,
start_from=None,
only_fields=None,
versions=None,
only_published=True,
page=None,
page_size=None,
order_by=None,
search_text=None,
tags=None,
system_tags=None,
**kwargs
):
super(GetVersionsRequest, self).__init__(**kwargs)
self.start_from = start_from
self.dataset = dataset
self.only_fields = only_fields
self.versions = versions
self.only_published = only_published
self.page = page
self.page_size = page_size
self.order_by = order_by
self.search_text = search_text
self.tags = tags
self.system_tags = system_tags
@schema_property("start_from")
def start_from(self):
return self._property_start_from
@start_from.setter
def start_from(self, value):
if value is None:
self._property_start_from = None
return
self.assert_isinstance(value, "start_from", six.string_types)
self._property_start_from = value
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("only_fields")
def only_fields(self):
return self._property_only_fields
@only_fields.setter
def only_fields(self, value):
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("versions")
def versions(self):
return self._property_versions
@versions.setter
def versions(self, value):
if value is None:
self._property_versions = None
return
self.assert_isinstance(value, "versions", (list, tuple))
self.assert_isinstance(value, "versions", six.string_types, is_array=True)
self._property_versions = value
@schema_property("only_published")
def only_published(self):
return self._property_only_published
@only_published.setter
def only_published(self, value):
if value is None:
self._property_only_published = None
return
self.assert_isinstance(value, "only_published", (bool,))
self._property_only_published = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self):
return self._property_order_by
@order_by.setter
def order_by(self, value):
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("search_text")
def search_text(self):
return self._property_search_text
@search_text.setter
def search_text(self, value):
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
|
GetVersionsRequest
|
python
|
getsentry__sentry
|
tests/sentry/api/test_base.py
|
{
"start": 1540,
"end": 1702
}
|
class ____(Endpoint):
permission_classes: tuple[type[BasePermission], ...] = ()
def get(self, request):
return Response({"ok": True})
|
DummyEndpoint
|
python
|
python-excel__xlwt
|
xlwt/antlr.py
|
{
"start": 31173,
"end": 33621
}
|
class ____(TokenStreamBasicFilter):
def __init__(self,input):
TokenStreamBasicFilter.__init__(self,input)
self.hideMask = BitSet()
self.nextMonitoredToken = None
self.lastHiddenToken = None
self.firstHidden = None
def consume(self):
self.nextMonitoredToken = self.input.nextToken()
def consumeFirst(self):
self.consume()
p = None;
while self.hideMask.member(self.LA(1).getType()) or \
self.discardMask.member(self.LA(1).getType()):
if self.hideMask.member(self.LA(1).getType()):
if not p:
p = self.LA(1)
else:
p.setHiddenAfter(self.LA(1))
self.LA(1).setHiddenBefore(p)
p = self.LA(1)
self.lastHiddenToken = p
if not self.firstHidden:
self.firstHidden = p
self.consume()
def getDiscardMask(self):
return self.discardMask
def getHiddenAfter(self,t):
return t.getHiddenAfter()
def getHiddenBefore(self,t):
return t.getHiddenBefore()
def getHideMask(self):
return self.hideMask
def getInitialHiddenToken(self):
return self.firstHidden
def hide(self,m):
if isinstance(m,int):
self.hideMask.add(m)
return
if isinstance(m.BitMask):
self.hideMask = m
return
def LA(self,i):
return self.nextMonitoredToken
def nextToken(self):
if not self.LA(1):
self.consumeFirst()
monitored = self.LA(1)
monitored.setHiddenBefore(self.lastHiddenToken)
self.lastHiddenToken = None
self.consume()
p = monitored
while self.hideMask.member(self.LA(1).getType()) or \
self.discardMask.member(self.LA(1).getType()):
if self.hideMask.member(self.LA(1).getType()):
p.setHiddenAfter(self.LA(1))
if p != monitored:
self.LA(1).setHiddenBefore(p)
p = self.lastHiddenToken = self.LA(1)
self.consume()
return monitored
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### StringBuffer ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
|
TokenStreamHiddenTokenFilter
|
python
|
redis__redis-py
|
redis/asyncio/client.py
|
{
"start": 29415,
"end": 32218
}
|
class ____:
"""
Monitor is useful for handling the MONITOR command to the redis server.
next_command() method returns one command from monitor
listen() method yields commands from monitor.
"""
monitor_re = re.compile(r"\[(\d+) (.*?)\] (.*)")
command_re = re.compile(r'"(.*?)(?<!\\)"')
def __init__(self, connection_pool: ConnectionPool):
self.connection_pool = connection_pool
self.connection: Optional[Connection] = None
async def connect(self):
if self.connection is None:
self.connection = await self.connection_pool.get_connection()
async def __aenter__(self):
await self.connect()
await self.connection.send_command("MONITOR")
# check that monitor returns 'OK', but don't return it to user
response = await self.connection.read_response()
if not bool_ok(response):
raise RedisError(f"MONITOR failed: {response}")
return self
async def __aexit__(self, *args):
await self.connection.disconnect()
await self.connection_pool.release(self.connection)
async def next_command(self) -> MonitorCommandInfo:
"""Parse the response from a monitor command"""
await self.connect()
response = await self.connection.read_response()
if isinstance(response, bytes):
response = self.connection.encoder.decode(response, force=True)
command_time, command_data = response.split(" ", 1)
m = self.monitor_re.match(command_data)
db_id, client_info, command = m.groups()
command = " ".join(self.command_re.findall(command))
# Redis escapes double quotes because each piece of the command
# string is surrounded by double quotes. We don't have that
# requirement so remove the escaping and leave the quote.
command = command.replace('\\"', '"')
if client_info == "lua":
client_address = "lua"
client_port = ""
client_type = "lua"
elif client_info.startswith("unix"):
client_address = "unix"
client_port = client_info[5:]
client_type = "unix"
else:
# use rsplit as ipv6 addresses contain colons
client_address, client_port = client_info.rsplit(":", 1)
client_type = "tcp"
return {
"time": float(command_time),
"db": int(db_id),
"client_address": client_address,
"client_port": client_port,
"client_type": client_type,
"command": command,
}
async def listen(self) -> AsyncIterator[MonitorCommandInfo]:
"""Listen for commands coming to the server."""
while True:
yield await self.next_command()
|
Monitor
|
python
|
spack__spack
|
lib/spack/spack/llnl/util/argparsewriter.py
|
{
"start": 293,
"end": 1694
}
|
class ____:
"""Parsed representation of a command from argparse.
This is a single command from an argparse parser. ``ArgparseWriter`` creates these and returns
them from ``parse()``, and it passes one of these to each call to ``format()`` so that we can
take an action for a single command.
"""
def __init__(
self,
prog: str,
description: Optional[str],
usage: str,
positionals: List[Tuple[str, Optional[Iterable[Any]], Union[int, str, None], str]],
optionals: List[Tuple[Sequence[str], List[str], str, Union[int, str, None], str]],
subcommands: List[Tuple[ArgumentParser, str, str]],
) -> None:
"""Initialize a new Command instance.
Args:
prog: Program name.
description: Command description.
usage: Command usage.
positionals: List of positional arguments.
optionals: List of optional arguments.
subcommands: List of subcommand parsers.
"""
self.prog = prog
self.description = description
self.usage = usage
self.positionals = positionals
self.optionals = optionals
self.subcommands = subcommands
# NOTE: The only reason we subclass argparse.HelpFormatter is to get access to self._expand_help(),
# ArgparseWriter is not intended to be used as a formatter_class.
|
Command
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/hitl.py
|
{
"start": 4298,
"end": 6352
}
|
class ____(Base, HITLDetailPropertyMixin):
"""Human-in-the-loop request and corresponding response."""
__tablename__ = "hitl_detail"
ti_id: Mapped[str] = mapped_column(
String(36).with_variant(postgresql.UUID(as_uuid=False), "postgresql"),
primary_key=True,
nullable=False,
)
# User Request Detail
options: Mapped[dict] = mapped_column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False)
subject: Mapped[str] = mapped_column(Text, nullable=False)
body: Mapped[str | None] = mapped_column(Text, nullable=True)
defaults: Mapped[dict | None] = mapped_column(sqlalchemy_jsonfield.JSONField(json=json), nullable=True)
multiple: Mapped[bool | None] = mapped_column(Boolean, unique=False, default=False, nullable=True)
params: Mapped[dict] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={}
)
assignees: Mapped[list[dict[str, str]] | None] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json), nullable=True
)
created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False)
# Response Content Detail
responded_at: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
responded_by: Mapped[dict | None] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json), nullable=True
)
chosen_options: Mapped[list[str] | None] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json),
nullable=True,
default=None,
)
params_input: Mapped[dict] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json), nullable=False, default={}
)
task_instance = relationship(
"TaskInstance",
lazy="joined",
back_populates="hitl_detail",
)
__table_args__ = (
ForeignKeyConstraint(
(ti_id,),
["task_instance.id"],
name="hitl_detail_ti_fkey",
ondelete="CASCADE",
onupdate="CASCADE",
),
)
|
HITLDetail
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/run_request.py
|
{
"start": 2595,
"end": 11117
}
|
class ____(IHaveNew, LegacyNamedTupleMixin):
"""Represents all the information required to launch a single run. Must be returned by a
SensorDefinition or ScheduleDefinition's evaluation function for a run to be launched.
Args:
run_key (Optional[str]): A string key to identify this launched run. For sensors, ensures that
only one run is created per run key across all sensor evaluations. For schedules,
ensures that one run is created per tick, across failure recoveries. Passing in a `None`
value means that a run will always be launched per evaluation.
run_config (Optional[Union[RunConfig, Mapping[str, Any]]]: Configuration for the run. If the job has
a :py:class:`PartitionedConfig`, this value will override replace the config
provided by it.
tags (Optional[Dict[str, Any]]): A dictionary of tags (string key-value pairs) to attach
to the launched run.
job_name (Optional[str]): The name of the job this run request will launch.
Required for sensors that target multiple jobs.
asset_selection (Optional[Sequence[AssetKey]]): A subselection of assets that should be
launched with this run. If the sensor or schedule targets a job, then by default a
RunRequest returned from it will launch all of the assets in the job. If the sensor
targets an asset selection, then by default a RunRequest returned from it will launch
all the assets in the selection. This argument is used to specify that only a subset of
these assets should be launched, instead of all of them.
asset_check_keys (Optional[Sequence[AssetCheckKey]]): A subselection of asset checks that
should be launched with this run. If the sensor/schedule targets a job, then by default a
RunRequest returned from it will launch all of the asset checks in the job. If the
sensor/schedule targets an asset selection, then by default a RunRequest returned from it
will launch all the asset checks in the selection. This argument is used to specify that
only a subset of these asset checks should be launched, instead of all of them.
stale_assets_only (bool): Set to true to further narrow the asset
selection to stale assets. If passed without an asset selection, all stale assets in the
job will be materialized. If the job does not materialize assets, this flag is ignored.
partition_key (Optional[str]): The partition key for this run request.
"""
run_key: Optional[str]
run_config: Mapping[str, Any]
tags: Mapping[str, str]
job_name: Optional[str]
asset_selection: Optional[Sequence[AssetKey]]
stale_assets_only: bool
partition_key: Optional[str]
asset_check_keys: Optional[Sequence[AssetCheckKey]]
asset_graph_subset: Optional[AssetGraphSubset]
def __new__(
cls,
run_key: Optional[str] = None,
run_config: Optional[Union["RunConfig", Mapping[str, Any]]] = None,
tags: Optional[Mapping[str, Any]] = None,
job_name: Optional[str] = None,
asset_selection: Optional[Sequence[AssetKey]] = None,
stale_assets_only: bool = False,
partition_key: Optional[str] = None,
asset_check_keys: Optional[Sequence[AssetCheckKey]] = None,
**kwargs,
):
from dagster._core.definitions.run_config import convert_config_input
if kwargs.get("asset_graph_subset") is not None:
# asset_graph_subset is only passed if you use the RunRequest.for_asset_graph_subset helper
# constructor, so we assume that no other parameters were passed.
return super().__new__(
cls,
run_key=None,
run_config={},
tags=normalize_tags(tags),
job_name=None,
asset_selection=None,
stale_assets_only=False,
partition_key=None,
asset_check_keys=None,
asset_graph_subset=check.inst_param(
kwargs["asset_graph_subset"], "asset_graph_subset", AssetGraphSubset
),
)
return super().__new__(
cls,
run_key=run_key,
run_config=convert_config_input(run_config) or {},
tags=normalize_tags(tags),
job_name=job_name,
asset_selection=asset_selection,
stale_assets_only=stale_assets_only,
partition_key=partition_key,
asset_check_keys=asset_check_keys,
asset_graph_subset=None,
)
@classmethod
def for_asset_graph_subset(
cls,
asset_graph_subset: AssetGraphSubset,
tags: Optional[Mapping[str, str]],
) -> "RunRequest":
"""Constructs a RunRequest from an AssetGraphSubset. When processed by the sensor
daemon, this will launch a backfill instead of a run.
Note: This constructor is intentionally left private since AssetGraphSubset is not part of the
public API. Other constructor methods will be public.
"""
return RunRequest(tags=tags, asset_graph_subset=asset_graph_subset)
def with_replaced_attrs(self, **kwargs: Any) -> "RunRequest":
fields = self._asdict()
for k in fields.keys():
if k in kwargs:
fields[k] = kwargs[k] # pyright: ignore[reportIndexIssue]
return RunRequest(**fields)
def with_resolved_tags_and_config(
self,
target_definition: "JobDefinition",
dynamic_partitions_requests: Sequence[
Union[AddDynamicPartitionsRequest, DeleteDynamicPartitionsRequest]
],
dynamic_partitions_store: Optional["DynamicPartitionsStore"],
) -> "RunRequest":
from dagster._core.instance.types import DynamicPartitionsStoreAfterRequests
if self.partition_key is None:
check.failed(
"Cannot resolve partition for run request without partition key",
)
dynamic_partitions_store_after_requests = (
DynamicPartitionsStoreAfterRequests.from_requests(
dynamic_partitions_store, dynamic_partitions_requests
)
if dynamic_partitions_store
else None
)
with partition_loading_context(
dynamic_partitions_store=dynamic_partitions_store_after_requests
) as ctx:
context = ctx
target_definition.validate_partition_key(
self.partition_key, selected_asset_keys=self.asset_selection, context=context
)
tags = {
**(self.tags or {}),
**target_definition.get_tags_for_partition_key(
self.partition_key, selected_asset_keys=self.asset_selection
),
}
return self.with_replaced_attrs(
run_config=(
self.run_config
if self.run_config
else target_definition.get_run_config_for_partition_key(self.partition_key)
),
tags=tags,
)
def has_resolved_partition(self) -> bool:
# Backcompat run requests yielded via `run_request_for_partition` already have resolved
# partitioning
return self.tags.get(PARTITION_NAME_TAG) is not None if self.partition_key else True
@property
def partition_key_range(self) -> Optional[PartitionKeyRange]:
if (
ASSET_PARTITION_RANGE_START_TAG in self.tags
and ASSET_PARTITION_RANGE_END_TAG in self.tags
):
return PartitionKeyRange(
self.tags[ASSET_PARTITION_RANGE_START_TAG], self.tags[ASSET_PARTITION_RANGE_END_TAG]
)
else:
return None
@property
def entity_keys(self) -> Sequence[EntityKey]:
return [*(self.asset_selection or []), *(self.asset_check_keys or [])]
def requires_backfill_daemon(self) -> bool:
"""For now we always send RunRequests with an asset_graph_subset to the backfill daemon, but
eventaully we will want to introspect on the asset_graph_subset to determine if we can
execute it as a single run instead.
"""
return self.asset_graph_subset is not None
@whitelist_for_serdes(
storage_name="PipelineRunReaction",
storage_field_names={
"dagster_run": "pipeline_run",
},
)
|
RunRequest
|
python
|
ansible__ansible
|
lib/ansible/module_utils/_internal/_concurrent/_daemon_threading.py
|
{
"start": 321,
"end": 1085
}
|
class ____(_threading.Thread):
"""
Daemon-only Thread subclass; prevents running threads of this type from blocking interpreter shutdown and process exit.
The join() method is a no-op.
"""
def __init__(self, *args, daemon: bool | None = None, **kwargs) -> None:
super().__init__(*args, daemon=daemon or True, **kwargs)
def join(self, timeout=None) -> None:
"""ThreadPoolExecutor's atexit handler joins all queue threads before allowing shutdown; prevent them from blocking."""
Thread = _DaemonThread # shadow the real Thread attr with our _DaemonThread
def __getattr__(name: str) -> _t.Any:
"""Delegate anything not defined locally to the real `threading` module."""
return getattr(_threading, name)
|
_DaemonThread
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.