language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/actions.py
|
{
"start": 16486,
"end": 16638
}
|
class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a task run"""
name: Optional[str] = Field(default=None)
|
TaskRunUpdate
|
python
|
coleifer__peewee
|
tests/sqlite.py
|
{
"start": 52532,
"end": 61579
}
|
class ____(BaseFTSTestCase, ModelTestCase):
database = database
requires = [FTS5Test]
test_corpus = (
('foo aa bb', 'aa bb cc ' * 10, 1),
('bar bb cc', 'bb cc dd ' * 9, 2),
('baze cc dd', 'cc dd ee ' * 8, 3),
('nug aa dd', 'bb cc ' * 7, 4))
def setUp(self):
super(TestFTS5, self).setUp()
for title, data, misc in self.test_corpus:
FTS5Test.create(title=title, data=data, misc=misc)
def test_create_table(self):
query = FTS5Test._schema._create_table()
self.assertSQL(query, (
'CREATE VIRTUAL TABLE IF NOT EXISTS "fts5_test" USING fts5 '
'("title", "data", "misc" UNINDEXED)'), [])
def test_custom_fts5_command(self):
merge_sql = FTS5Test._fts_cmd_sql('merge', rank=4)
self.assertSQL(merge_sql, (
'INSERT INTO "fts5_test" ("fts5_test", "rank") VALUES (?, ?)'),
['merge', 4])
FTS5Test.merge(4) # Runs without error.
FTS5Test.insert_many([{'title': 'k%08d' % i, 'data': 'v%08d' % i}
for i in range(100)]).execute()
FTS5Test.integrity_check(rank=0)
FTS5Test.optimize()
def test_create_table_options(self):
class Test1(FTS5Model):
f1 = SearchField()
f2 = SearchField(unindexed=True)
f3 = SearchField()
class Meta:
database = self.database
options = {
'prefix': (2, 3),
'tokenize': 'porter unicode61',
'content': Post,
'content_rowid': Post.id}
query = Test1._schema._create_table()
self.assertSQL(query, (
'CREATE VIRTUAL TABLE IF NOT EXISTS "test1" USING fts5 ('
'"f1", "f2" UNINDEXED, "f3", '
'content="post", content_rowid="id", '
'prefix=\'2,3\', tokenize="porter unicode61")'), [])
def assertResults(self, query, expected, scores=False, alias='score'):
if scores:
results = [(obj.title, round(getattr(obj, alias), 7))
for obj in query]
else:
results = [obj.title for obj in query]
self.assertEqual(results, expected)
def test_search(self):
query = FTS5Test.search('bb')
self.assertSQL(query, (
'SELECT "t1"."rowid", "t1"."title", "t1"."data", "t1"."misc" '
'FROM "fts5_test" AS "t1" '
'WHERE ("fts5_test" MATCH ?) ORDER BY rank'), ['bb'])
self.assertResults(query, ['nug aa dd', 'foo aa bb', 'bar bb cc'])
self.assertResults(FTS5Test.search('baze OR dd'),
['baze cc dd', 'bar bb cc', 'nug aa dd'])
@requires_models(FTS5Document)
def test_fts_manual(self):
messages = [FTS5Document.create(message=message)
for message in self.messages]
query = (FTS5Document
.select()
.where(FTS5Document.match('believe'))
.order_by(FTS5Document.rowid))
self.assertMessages(query, [0, 3])
query = FTS5Document.search('believe')
self.assertMessages(query, [3, 0])
# Test SQLite's built-in ranking algorithm (bm25). The results should
# be comparable to our user-defined implementation.
query = FTS5Document.search('things', with_score=True)
self.assertEqual([(d.message, round(d.score, 2)) for d in query], [
(self.messages[4], -0.45),
(self.messages[2], -0.37)])
# Another test of bm25 ranking.
query = FTS5Document.search_bm25('believe', with_score=True)
self.assertEqual([(d.message, round(d.score, 2)) for d in query], [
(self.messages[3], -0.49),
(self.messages[0], -0.36)])
query = FTS5Document.search_bm25('god faith', with_score=True)
self.assertEqual([(d.message, round(d.score, 2)) for d in query], [
(self.messages[1], -0.93)])
query = FTS5Document.search_bm25('"it is"', with_score=True)
self.assertEqual([(d.message, round(d.score, 2)) for d in query], [
(self.messages[2], -0.37),
(self.messages[3], -0.37)])
def test_match_column_queries(self):
data = (
('alpha one', 'apple aspires to ace artsy beta launch'),
('beta two', 'beta boasts better broadcast over apple'),
('gamma three', 'gold gray green gamma ray delta data'),
('delta four', 'delta data indicates downturn for apple beta'),
)
FT = FTS5Test
for i, (title, message) in enumerate(data):
FT.create(title=title, data=message, misc=str(i))
def assertQ(expr, idxscore):
q = (FT
.select(FT, FT.bm25().alias('score'))
.where(expr)
.order_by(SQL('score'), FT.misc.cast('int')))
self.assertEqual([(int(r.misc), round(r.score, 2)) for r in q],
idxscore)
# Single whitespace does not affect the mapping of col->term. We can
# also store the column value in quotes if single-quotes are used.
assertQ(FT.match('beta'), [(1, -0.74), (0, -0.57), (3, -0.57)])
assertQ(FT.match('title: beta'), [(1, -2.08)])
assertQ(FT.match('title: ^bet*'), [(1, -2.08)])
assertQ(FT.match('title: "beta"'), [(1, -2.08)])
assertQ(FT.match('"beta"'), [(1, -0.74), (0, -0.57), (3, -0.57)])
# Alternatively, just specify the column explicitly.
assertQ(FT.title.match('beta'), [(1, -2.08)])
assertQ(FT.title.match(' beta '), [(1, -2.08)])
assertQ(FT.title.match('"beta"'), [(1, -2.08)])
assertQ(FT.title.match('^bet*'), [(1, -2.08)])
assertQ(FT.title.match('"^bet*"'), []) # No wildcards in quotes!
# apple beta delta gamma
# 0 | alpha | X X
# 1 | beta | X X
# 2 | gamma | X X
# 3 | delta | X X X
#
assertQ(FT.match('delta NOT gamma'), [(3, -1.53)])
assertQ(FT.match('delta NOT data:gamma'), [(3, -1.53)])
assertQ(FT.match('"delta"'), [(3, -1.53), (2, -1.2)])
assertQ(FT.match('title:delta OR data:delta'), [(3, -3.21), (2, -1.2)])
assertQ(FT.match('"^delta"'), [(3, -1.53), (2, -1.2)]) # Different.
assertQ(FT.match('^delta'), [(3, -2.57)]) # Different from FTS4.
assertQ(FT.match('(delta AND data:apple) OR title:alpha'),
[(3, -2.09), (0, -2.02)])
assertQ(FT.match('(data:delta AND data:apple) OR title:alpha'),
[(0, -2.02), (3, -1.76)])
assertQ(FT.match('data:delta data:apple OR title:alpha'),
[(0, -2.02), (3, -1.76)])
assertQ(FT.match('(data:delta AND data:apple) OR beta'),
[(3, -2.33), (1, -0.74), (0, -0.57)])
assertQ(FT.match('data:delta AND (data:apple OR title:alpha)'),
[(3, -1.76)])
# data apple (0,1,3) OR (...irrelevant...).
assertQ(FT.match('data:apple OR title:alpha NOT delta'),
[(0, -2.58), (1, -0.58), (3, -0.57)])
assertQ(FT.match('data:apple OR (title:alpha NOT data:delta)'),
[(0, -2.58), (1, -0.58), (3, -0.57)])
# data apple OR title alpha (0, 1, 3) AND NOT delta (2, 3) -> (0, 1).
assertQ(FT.match('(data:apple OR title:alpha) NOT delta'),
[(0, -2.58), (1, -0.58)])
def test_highlight_function(self):
query = (FTS5Test
.search('dd')
.select(FTS5Test.title.highlight('[', ']').alias('hi')))
accum = [row.hi for row in query]
self.assertEqual(accum, ['baze cc [dd]', 'bar bb cc', 'nug aa [dd]'])
query = (FTS5Test
.search('bb')
.select(FTS5Test.data.highlight('[', ']').alias('hi')))
accum = [row.hi[:7] for row in query]
self.assertEqual(accum, ['[bb] cc', 'aa [bb]', '[bb] cc'])
def test_snippet_function(self):
snip = FTS5Test.data.snippet('[', ']', max_tokens=5).alias('snip')
query = FTS5Test.search('dd').select(snip)
accum = [row.snip for row in query]
self.assertEqual(accum, [
'cc [dd] ee cc [dd]...',
'bb cc [dd] bb cc...',
'bb cc bb cc bb...'])
def test_clean_query(self):
cases = (
('test', 'test'),
('"test"', '"test"'),
('"test\u2022"', '"test\u2022"'),
('test\u2022', 'test\u2022'),
('test-', 'test\x1a'),
('"test-"', '"test-"'),
('\\"test-', '\x1a test\x1a'),
('--test--', '\x1a\x1atest\x1a\x1a'),
('-test- "-test-"', '\x1atest\x1a "-test-"'),
)
for a, b in cases:
self.assertEqual(FTS5Test.clean_query(a), b)
@skip_unless(CYTHON_EXTENSION, 'requires sqlite c extension')
|
TestFTS5
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/utils/pod_manager.py
|
{
"start": 8017,
"end": 8116
}
|
class ____(AirflowException):
"""Expected pod does not exist in kube-api."""
|
PodNotFoundException
|
python
|
spyder-ide__spyder
|
spyder/widgets/comboboxes.py
|
{
"start": 4816,
"end": 5879
}
|
class ____(BaseComboBox):
"""Search pattern combo box"""
def __init__(
self,
parent,
items=None,
tip=None,
adjust_to_minimum=True,
id_=None,
items_elide_mode=None,
):
if not PYSIDE2:
super().__init__(parent, items_elide_mode)
else:
BaseComboBox.__init__(self, parent, items_elide_mode)
if adjust_to_minimum:
self.setSizeAdjustPolicy(
QComboBox.AdjustToMinimumContentsLengthWithIcon
)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
if items is not None:
self.addItems(items)
if tip is not None:
self.setToolTip(tip)
if id_ is not None:
self.ID = id_
# Use a line edit with a clear button inside it.
# Note: The method Qt offers for this (setClearButtonEnabled) adds a
# button whose icon can't be easily stylized.
self.setLineEdit(ClearLineEdit(self, reposition_button=True))
|
PatternComboBox
|
python
|
huggingface__transformers
|
src/transformers/modeling_outputs.py
|
{
"start": 2255,
"end": 3215
}
|
class ____(ModelOutput):
"""
Base class for model's outputs, with potential hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, num_channels, height, width)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
|
BaseModelOutputWithNoAttention
|
python
|
jazzband__django-redis
|
tests/settings_wrapper.py
|
{
"start": 100,
"end": 1033
}
|
class ____:
def __init__(self) -> None:
self._to_restore: list[override_settings]
object.__setattr__(self, "_to_restore", [])
def __delattr__(self, attr: str) -> None:
from django.test import override_settings
override = override_settings()
override.enable()
from django.conf import settings
delattr(settings, attr)
self._to_restore.append(override)
def __setattr__(self, attr: str, value) -> None:
from django.test import override_settings
override = override_settings(**{attr: value})
override.enable()
self._to_restore.append(override)
def __getattr__(self, attr: str):
from django.conf import settings
return getattr(settings, attr)
def finalize(self) -> None:
for override in reversed(self._to_restore):
override.disable()
del self._to_restore[:]
|
SettingsWrapper
|
python
|
getsentry__sentry
|
src/sentry/integrations/models/repository_project_path_config.py
|
{
"start": 391,
"end": 3397
}
|
class ____(DefaultFieldsModelExisting):
__relocation_scope__ = RelocationScope.Excluded
repository = FlexibleForeignKey("sentry.Repository")
project = FlexibleForeignKey("sentry.Project", db_constraint=False)
organization_integration_id = HybridCloudForeignKey(
"sentry.OrganizationIntegration", on_delete="CASCADE"
)
organization_id = BoundedBigIntegerField(db_index=True)
# From a region point of view, you really only have per organization scoping.
integration_id = BoundedBigIntegerField(db_index=False)
stack_root = models.TextField()
source_root = models.TextField()
default_branch = models.TextField(null=True)
# Indicates if Sentry created this mapping
automatically_generated = models.BooleanField(default=False, db_default=False)
class Meta:
app_label = "sentry"
db_table = "sentry_repositoryprojectpathconfig"
unique_together = (("project", "stack_root"),)
def __repr__(self) -> str:
return (
f"RepositoryProjectPathConfig(repo={self.repository.name}, "
+ f"branch={self.default_branch}, "
+ f"stack_root={self.stack_root}, "
+ f"source_root={self.source_root})"
)
def process_resource_change(instance: RepositoryProjectPathConfig, **kwargs):
from sentry.models.group import Group
from sentry.models.project import Project
from sentry.tasks.codeowners import update_code_owners_schema
from sentry.utils.cache import cache
def _spawn_update_schema_task():
"""
We need to re-apply the updated code mapping against any CODEOWNERS file that uses this mapping.
"""
try:
update_code_owners_schema.apply_async(
kwargs={
"organization": instance.project.organization_id,
"projects": [instance.project_id],
}
)
except Project.DoesNotExist:
pass
def _clear_commit_context_cache():
"""
Once we have a new code mapping for a project, we want to give all groups in the project
a new chance to generate missing suspect commits. We debounce the process_commit_context task
if we cannot find the Suspect Committer from the given code mappings. Thus, need to clear the
cache to reprocess with the new code mapping
"""
group_ids = Group.objects.filter(project_id=instance.project_id).values_list(
"id", flat=True
)
cache_keys = [f"process-commit-context-{group_id}" for group_id in group_ids]
cache.delete_many(cache_keys)
transaction.on_commit(_spawn_update_schema_task, router.db_for_write(type(instance)))
transaction.on_commit(_clear_commit_context_cache, router.db_for_write(type(instance)))
post_save.connect(
lambda instance, **kwargs: process_resource_change(instance, **kwargs),
sender=RepositoryProjectPathConfig,
weak=False,
)
|
RepositoryProjectPathConfig
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_group_suspect_flags.py
|
{
"start": 780,
"end": 868
}
|
class ____(TypedDict):
data: list[ResponseDataItem]
@region_silo_endpoint
|
ResponseData
|
python
|
kamyu104__LeetCode-Solutions
|
Python/number-of-ways-to-wear-different-hats-to-each-other.py
|
{
"start": 37,
"end": 744
}
|
class ____(object):
def numberWays(self, hats):
"""
:type hats: List[List[int]]
:rtype: int
"""
MOD = 10**9 + 7
HAT_SIZE = 40
hat_to_people = [[] for _ in xrange(HAT_SIZE)]
for i in xrange(len(hats)):
for h in hats[i]:
hat_to_people[h-1].append(i)
dp = [0]*(1<<len(hats))
dp[0] = 1
for people in hat_to_people:
for mask in reversed(xrange(len(dp))):
for p in people:
if mask & (1<<p):
continue
dp[mask | (1<<p)] += dp[mask]
dp[mask | (1<<p)] %= MOD
return dp[-1]
|
Solution
|
python
|
pdm-project__pdm
|
src/pdm/resolver/resolvelib.py
|
{
"start": 750,
"end": 5863
}
|
class ____(Resolver):
def __post_init__(self) -> None:
super().__post_init__()
if self.locked_repository is None:
self.locked_repository = self.project.get_locked_repository()
supports_env_spec = "env_spec" in inspect.signature(self.project.get_provider).parameters
if supports_env_spec:
provider = self.project.get_provider(
self.update_strategy,
self.tracked_names,
direct_minimal_versions=FLAG_DIRECT_MINIMAL_VERSIONS in self.strategies,
env_spec=self.target,
locked_repository=self.locked_repository,
)
else: # pragma: no cover
provider = self.project.get_provider(
self.update_strategy,
self.tracked_names,
direct_minimal_versions=FLAG_DIRECT_MINIMAL_VERSIONS in self.strategies,
ignore_compatibility=self.target.is_allow_all(),
)
if isinstance(self.reporter, LockReporter):
provider.repository.reporter = self.reporter
self.provider = provider
def resolve(self) -> Resolution:
from pdm.models.repositories import Package
mapping = self._do_resolve()
if self.project.enable_write_lockfile: # type: ignore[has-type]
if isinstance(self.reporter, RichLockReporter):
self.reporter.update(info="Fetching hashes for resolved packages")
self.provider.repository.fetch_hashes(mapping.values())
if not (env_python := PySpecSet(self.target.requires_python)).is_superset(self.environment.python_requires):
python_marker = get_marker(env_python.as_marker_string())
for candidate in mapping.values():
marker = candidate.req.marker or get_marker("")
candidate.req = replace(candidate.req, marker=marker & python_marker)
backend = self.project.backend
packages: list[Package] = []
for candidate in mapping.values():
deps: list[str] = []
for r in self.provider.fetched_dependencies[candidate.dep_key]:
if isinstance(r, FileRequirement) and r.path:
try:
if r.path.is_absolute():
r.path = Path(os.path.normpath(r.path)).relative_to(os.path.normpath(self.project.root))
except ValueError:
pass
else:
r.url = backend.relative_path_to_url(r.path.as_posix())
deps.append(r.as_line())
packages.append(Package(candidate, deps, candidate.summary))
return Resolution(packages, self.requested_groups)
def _do_resolve(self) -> dict[str, Candidate]:
from resolvelib import Resolver as _Resolver
resolver_class = cast("type[_Resolver]", getattr(self.project.core, "resolver_class", _Resolver))
resolver = resolver_class(self.provider, self.reporter)
provider = self.provider
repository = self.provider.repository
target = self.target
python_req = PythonRequirement.from_pyspec_set(PySpecSet(target.requires_python))
requirements: list[Requirement] = [python_req, *self.requirements]
max_rounds = self.project.config["strategy.resolve_max_rounds"]
result = resolver.resolve(requirements, max_rounds)
if repository.has_warnings:
self.project.core.ui.info(
"Use `-q/--quiet` to suppress these warnings, or ignore them per-package with "
r"`ignore_package_warnings` config in \[tool.pdm] table.",
verbosity=termui.Verbosity.NORMAL,
)
mapping = cast(dict[str, Candidate], result.mapping)
mapping.pop("python", None)
local_name = normalize_name(self.project.name) if self.project.is_distribution else None
for key, candidate in list(mapping.items()):
if key is None:
continue
# For source distribution whose name can only be determined after it is built,
# the key in the resolution map and criteria should be updated.
if key.startswith(":empty:"):
new_key = provider.identify(candidate)
mapping[new_key] = mapping.pop(key)
result.criteria[new_key] = result.criteria.pop(key) # type: ignore[attr-defined]
if FLAG_INHERIT_METADATA in self.strategies:
all_markers = merge_markers(result)
populate_groups(result)
else:
all_markers = {}
for key, candidate in list(mapping.items()):
if key in all_markers:
marker = all_markers[key]
if marker.is_empty():
del mapping[key]
continue
candidate.req = replace(candidate.req, marker=None if marker.is_any() else marker)
if not self.keep_self and strip_extras(key)[0] == local_name:
del mapping[key]
return mapping
|
RLResolver
|
python
|
numba__numba
|
numba/core/types/abstract.py
|
{
"start": 10607,
"end": 10773
}
|
class ____(Sequence):
"""
Base class for 1d mutable sequence types. Instances should have the
*dtype* attribute.
"""
mutable = True
|
MutableSequence
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-happy-prefix.py
|
{
"start": 639,
"end": 1411
}
|
class ____(object):
def longestPrefix(self, s):
"""
:type s: str
:rtype: str
"""
M = 10**9+7
D = 26
def check(l, s):
for i in xrange(l):
if s[i] != s[len(s)-l+i]:
return False
return True
result, prefix, suffix, power = 0, 0, 0, 1
for i in xrange(len(s)-1):
prefix = (prefix*D + (ord(s[i])-ord('a'))) % M
suffix = (suffix + (ord(s[len(s)-(i+1)])-ord('a'))*power) % M
power = (power*D)%M
if prefix == suffix:
# we assume M is a very large prime without hash collision
# assert(check(i+1, s))
result = i+1
return s[:result]
|
Solution2
|
python
|
sympy__sympy
|
sympy/utilities/matchpy_connector.py
|
{
"start": 5825,
"end": 5942
}
|
class ____(_WildAbstract):
min_length = 1
fixed_size = False
@doctest_depends_on(modules=('matchpy',))
|
WildPlus
|
python
|
Pylons__pyramid
|
tests/test_i18n.py
|
{
"start": 7918,
"end": 8926
}
|
class ____(unittest.TestCase):
def setUp(self):
testing.setUp()
def tearDown(self):
testing.tearDown()
def _callFUT(self, request):
from pyramid.i18n import default_locale_negotiator
return default_locale_negotiator(request)
def test_from_none(self):
request = DummyRequest()
result = self._callFUT(request)
self.assertEqual(result, None)
def test_from_request_attr(self):
request = DummyRequest()
request._LOCALE_ = 'foo'
result = self._callFUT(request)
self.assertEqual(result, 'foo')
def test_from_params(self):
request = DummyRequest()
request.params['_LOCALE_'] = 'foo'
result = self._callFUT(request)
self.assertEqual(result, 'foo')
def test_from_cookies(self):
request = DummyRequest()
request.cookies['_LOCALE_'] = 'foo'
result = self._callFUT(request)
self.assertEqual(result, 'foo')
|
Test_default_locale_negotiator
|
python
|
pypa__virtualenv
|
src/virtualenv/run/plugin/activators.py
|
{
"start": 149,
"end": 2235
}
|
class ____(ComponentBuilder):
def __init__(self, interpreter, parser) -> None:
self.default = None
possible = OrderedDict(
(k, v) for k, v in self.options("virtualenv.activate").items() if v.supports(interpreter)
)
super().__init__(interpreter, parser, "activators", possible)
self.parser.description = "options for activation scripts"
self.active = None
def add_selector_arg_parse(self, name, choices):
self.default = ",".join(choices)
self.parser.add_argument(
f"--{name}",
default=self.default,
metavar="comma_sep_list",
required=False,
help="activators to generate - default is all supported",
type=self._extract_activators,
)
def _extract_activators(self, entered_str):
elements = [e.strip() for e in entered_str.split(",") if e.strip()]
missing = [e for e in elements if e not in self.possible]
if missing:
msg = f"the following activators are not available {','.join(missing)}"
raise ArgumentTypeError(msg)
return elements
def handle_selected_arg_parse(self, options):
selected_activators = (
self._extract_activators(self.default) if options.activators is self.default else options.activators
)
self.active = {k: v for k, v in self.possible.items() if k in selected_activators}
self.parser.add_argument(
"--prompt",
dest="prompt",
metavar="prompt",
help=(
"provides an alternative prompt prefix for this environment "
"(value of . means name of the current working directory)"
),
default=None,
)
for activator in self.active.values():
activator.add_parser_arguments(self.parser, self.interpreter)
def create(self, options):
return [activator_class(options) for activator_class in self.active.values()]
__all__ = [
"ActivationSelector",
]
|
ActivationSelector
|
python
|
cherrypy__cherrypy
|
cherrypy/_cptools.py
|
{
"start": 15104,
"end": 18959
}
|
class ____(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes
dict.
"""
def __init__(self, namespace):
"""Initialize a toolbox instance."""
self.namespace = namespace
def __setattr__(self, name, value):
"""Set an attribute on this :class:`Toolbox` instance."""
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split('.', 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get('on', False):
tool = getattr(self, name)
tool._setup()
def register(self, point, **kwargs):
"""Register a hook point handler in the toolbox.
Return a decorator which registers the function
at the given hook point.
"""
def decorator(func):
attr_name = kwargs.get('name', func.__name__)
tool = Tool(point, func, **kwargs)
setattr(self, attr_name, tool)
return func
return decorator
default_toolbox = _d = Toolbox('tools')
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.allow = Tool('on_start_resource', cptools.allow)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
_d.params = Tool('before_handler', cptools.convert_params, priority=15)
del _d, cptools, encoding, static
|
Toolbox
|
python
|
sphinx-doc__sphinx
|
sphinx/directives/other.py
|
{
"start": 7847,
"end": 8299
}
|
class ____(SphinxDirective):
"""Directive to give an explicit tabulary column definition to LaTeX."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {}
def run(self) -> list[Node]:
node = addnodes.tabular_col_spec()
node['spec'] = self.arguments[0]
self.set_source_info(node)
return [node]
|
TabularColumns
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py
|
{
"start": 810,
"end": 922
}
|
class ____(enum.Enum):
A = ...
B = ... # PIE796
C = ... # PIE796
from typing import cast
|
FakeEnum10
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v2/adapters.py
|
{
"start": 704,
"end": 788
}
|
class ____(TimeoutAdapter, HostHeaderSSLAdapter):
pass
|
TimeoutHostHeaderSSLAdapter
|
python
|
h5py__h5py
|
h5py/tests/test_file.py
|
{
"start": 25059,
"end": 25289
}
|
class ____(TestCase):
"""
Feature: Files can be flushed
"""
def test_flush(self):
""" Flush via .flush method """
fid = File(self.mktemp(), 'w')
fid.flush()
fid.close()
|
TestFlush
|
python
|
pytorch__pytorch
|
torch/backends/mkl/__init__.py
|
{
"start": 186,
"end": 1783
}
|
class ____:
"""
On-demand oneMKL verbosing functionality.
To make it easier to debug performance issues, oneMKL can dump verbose
messages containing execution information like duration while executing
the kernel. The verbosing functionality can be invoked via an environment
variable named `MKL_VERBOSE`. However, this methodology dumps messages in
all steps. Those are a large amount of verbose messages. Moreover, for
investigating the performance issues, generally taking verbose messages
for one single iteration is enough. This on-demand verbosing functionality
makes it possible to control scope for verbose message dumping. In the
following example, verbose messages will be dumped out for the second
inference only.
.. highlight:: python
.. code-block:: python
import torch
model(data)
with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON):
model(data)
Args:
level: Verbose level
- ``VERBOSE_OFF``: Disable verbosing
- ``VERBOSE_ON``: Enable verbosing
"""
def __init__(self, enable):
self.enable = enable
def __enter__(self):
if self.enable == VERBOSE_OFF:
return
st = torch._C._verbose.mkl_set_verbose(self.enable)
assert st, (
"Failed to set MKL into verbose mode. Please consider to disable this verbose scope."
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
torch._C._verbose.mkl_set_verbose(VERBOSE_OFF)
return False
|
verbose
|
python
|
gevent__gevent
|
src/greentest/3.14/test_urllib2.py
|
{
"start": 80901,
"end": 84920
}
|
class ____(unittest.TestCase):
class PutRequest(Request):
method = 'PUT'
def setUp(self):
self.get = Request("http://www.python.org/~jeremy/")
self.post = Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
self.head = Request("http://www.python.org/~jeremy/", method='HEAD')
self.put = self.PutRequest("http://www.python.org/~jeremy/")
self.force_post = self.PutRequest("http://www.python.org/~jeremy/",
method="POST")
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
self.assertEqual("HEAD", self.head.get_method())
self.assertEqual("PUT", self.put.get_method())
self.assertEqual("POST", self.force_post.get_method())
def test_data(self):
self.assertFalse(self.get.data)
self.assertEqual("GET", self.get.get_method())
self.get.data = "spam"
self.assertTrue(self.get.data)
self.assertEqual("POST", self.get.get_method())
# issue 16464
# if we change data we need to remove content-length header
# (cause it's most probably calculated for previous value)
def test_setting_data_should_remove_content_length(self):
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
self.get.add_unredirected_header("Content-length", 42)
self.assertEqual(42, self.get.unredirected_hdrs["Content-length"])
self.get.data = "spam"
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
# issue 17485 same for deleting data.
def test_deleting_data_should_remove_content_length(self):
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
self.get.data = 'foo'
self.get.add_unredirected_header("Content-length", 3)
self.assertEqual(3, self.get.unredirected_hdrs["Content-length"])
del self.get.data
self.assertNotIn("Content-length", self.get.unredirected_hdrs)
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.selector)
req = Request("http://www.python.org/")
self.assertEqual("/", req.selector)
def test_get_type(self):
self.assertEqual("http", self.get.type)
def test_get_host(self):
self.assertEqual("www.python.org", self.get.host)
def test_get_host_unquote(self):
req = Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.host)
def test_proxy(self):
self.assertFalse(self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assertTrue(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.origin_req_host)
self.assertEqual("www.perl.org", self.get.host)
def test_wrapped_url(self):
req = Request("<URL:http://www.python.org>")
self.assertEqual("www.python.org", req.host)
def test_url_fragment(self):
req = Request("http://www.python.org/?qs=query#fragment=true")
self.assertEqual("/?qs=query", req.selector)
req = Request("http://www.python.org/#fun=true")
self.assertEqual("/", req.selector)
# Issue 11703: geturl() omits fragment in the original URL.
url = 'http://docs.python.org/library/urllib2.html#OK'
req = Request(url)
self.assertEqual(req.get_full_url(), url)
def test_url_fullurl_get_full_url(self):
urls = ['http://docs.python.org',
'http://docs.python.org/library/urllib2.html#OK',
'http://www.python.org/?qs=query#fragment=true']
for url in urls:
req = Request(url)
self.assertEqual(req.get_full_url(), req.full_url)
if __name__ == "__main__":
unittest.main()
|
RequestTests
|
python
|
pytorch__pytorch
|
test/dynamo/test_guard_serialization.py
|
{
"start": 14199,
"end": 60265
}
|
class ____(TestGuardSerializationBase):
def test_function_locals(self):
def foo(x):
return x + 1
def fn(x, g):
return g(x) + 1
self._test_serialization("TENSOR_MATCH", fn, torch.randn(3), foo)
def test_tensor_match(self):
def f(x: torch.Tensor):
return x + 1
ref, loaded = self._test_serialization(
"TENSOR_MATCH", f, torch.ones(2, dtype=torch.float32)
)
self._test_check_fn(
ref, loaded, {"x": torch.randn(2, dtype=torch.float32)}, True
)
self._test_check_fn(
ref, loaded, {"x": torch.randn(3, dtype=torch.float32)}, False
)
self._test_check_fn(
ref, loaded, {"x": torch.randn(2, dtype=torch.float64)}, False
)
self._test_check_fn(ref, loaded, {"x": None}, False)
def test_not_present_in_generic_dict(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x + 1
m = Module()
def fn(x):
return m(x)
ref, loaded = self._test_serialization(
"NOT_PRESENT_IN_GENERIC_DICT", fn, torch.ones(2, dtype=torch.float32)
)
self._test_check_fn(ref, loaded, {"m": m}, True)
m.forward = types.MethodType(lambda x: x + 2, m)
self._test_check_fn(ref, loaded, {"m": m}, False)
def test_hasattr_serialization(self):
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = 1
def forward(self, x: torch.Tensor):
if hasattr(self, "a"):
return x + self.a
else:
return x + 2
m = Module()
def fn(x):
return m(x)
ref, loaded = self._test_serialization("HASATTR", fn, torch.randn(3))
self._test_check_fn(ref, loaded, {"m": m}, True)
delattr(m, "a")
self._test_check_fn(ref, loaded, {"m": m}, False)
def test_type_match(self):
class LocalModule(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x + 1
m = LocalModule()
def fn(m, x):
return m(x)
with self.assertRaisesRegex(
TypeError, "Please define the class at global scope"
):
self._test_serialization("TYPE_MATCH", fn, m, torch.randn(3))
m = GlobalModule()
ref, loaded = self._test_serialization("TYPE_MATCH", fn, m, torch.randn(3))
self._test_check_fn(ref, loaded, {"m": m}, True)
self._test_check_fn(ref, loaded, {"m": GlobalModule()}, True)
self._test_check_fn(ref, loaded, {"m": torch.nn.Module()}, False)
def test_tensor_subclass_metadata_match(self):
class LocalSubclass(torch.Tensor):
@staticmethod
def __new__(cls, a, outer_size=None, outer_stride=None):
if outer_size is None:
outer_size = a.size()
if outer_stride is None:
outer_stride = a.stride()
shape = outer_size
kwargs = {}
kwargs["strides"] = outer_stride
kwargs["storage_offset"] = a.storage_offset()
kwargs["device"] = a.device
kwargs["layout"] = a.layout
kwargs["requires_grad"] = a.requires_grad
kwargs["dtype"] = a.dtype
return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs)
def __init__(self, a, outer_size=None, outer_stride=None):
self.a = a
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs):
if kwargs is None:
kwargs = {}
args_a = pytree.tree_map_only(LocalSubclass, lambda x: x.a, args)
kwargs_a = pytree.tree_map_only(LocalSubclass, lambda x: x.a, kwargs)
out_a = func(*args_a, **kwargs_a)
if isinstance(out_a, torch.Tensor):
return LocalSubclass(out_a)
return out_a
def __tensor_flatten__(self):
return ["a"], None
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is None
a = inner_tensors["a"]
if type(a) is torch.Tensor:
assert outer_size is not None
assert outer_stride is not None
return LocalSubclass(a, outer_size, outer_stride)
def fn(x):
return x * 2
# === example subclass defined locally (error) ===
local_sub = LocalSubclass(torch.randn(3))
with self.assertRaisesRegex(
PackageError, "Please define the class at global scope"
):
self._test_serialization("TENSOR_SUBCLASS_METADATA_MATCH", fn, local_sub)
# === example subclass with None extra metadata ===
from torch.testing._internal.two_tensor import TwoTensor
tt = TwoTensor(torch.randn(3), torch.randn(3))
ref, loaded = self._test_serialization("TENSOR_SUBCLASS_METADATA_MATCH", fn, tt)
self._test_check_fn(ref, loaded, {"x": tt}, True)
self._test_check_fn(ref, loaded, {"x": torch.ones_like(tt)}, True)
# used below for convenience; returned func accepts some metadata and whether the
# guard is expected to pass for the given subclass type
def _get_meta_test_check_fn(ref, loaded, subclass_type):
def _f(meta, expected, ref=ref, loaded=loaded, subclass_type=subclass_type):
self._test_check_fn(
ref,
loaded,
{"x": subclass_type(torch.randn(3), extra=meta)},
expected,
)
return _f
# === example subclass with extra metadata ===
extra_meta = {
"foo": 5,
"bar": "hello",
}
sub = SubclassWithMeta(torch.randn(3), extra=extra_meta)
ref, loaded = self._test_serialization(
"TENSOR_SUBCLASS_METADATA_MATCH", fn, sub
)
self._test_check_fn(ref, loaded, {"x": sub}, True)
check_with_meta = _get_meta_test_check_fn(ref, loaded, SubclassWithMeta)
check_with_meta(dict(extra_meta), True)
# different "foo"
check_with_meta({"foo": 6, "bar": "hello"}, False)
# different "bar"
check_with_meta({"foo": 5, "bar": "world"}, False)
# === example subclass with custom metadata guard logic ===
sub = SubclassWithCustomMetadataGuard(torch.randn(3), extra=extra_meta)
ref, loaded = self._test_serialization(
"TENSOR_SUBCLASS_METADATA_MATCH", fn, sub
)
self._test_check_fn(ref, loaded, {"x": sub}, True)
check_with_meta = _get_meta_test_check_fn(
ref, loaded, SubclassWithCustomMetadataGuard
)
check_with_meta(dict(extra_meta), True)
# different "foo"; custom logic says this is okay
check_with_meta({"foo": 6, "bar": "hello"}, True)
# different "bar"
check_with_meta({"foo": 5, "bar": "world"}, False)
# === example subclass with subclass inner tensor ===
sub = SubclassWithSubclassInnerTensor(torch.randn(3), extra=extra_meta)
ref, loaded = self._test_serialization(
"TENSOR_SUBCLASS_METADATA_MATCH", fn, sub
)
self._test_check_fn(ref, loaded, {"x": sub}, True)
check_with_meta = _get_meta_test_check_fn(
ref, loaded, SubclassWithSubclassInnerTensor
)
check_with_meta(dict(extra_meta), True)
# different "foo"
check_with_meta({"foo": 6, "bar": "hello"}, False)
# different "bar"
check_with_meta({"foo": 5, "bar": "world"}, False)
def test_equals_match(self):
def fn(x, y):
# CustomConstantType is registered as a pytree constant so this should
# result in an EQUALS_MATCH guard.
if x in y:
return torch.zeros(3)
return torch.ones(3)
x = CustomConstantType(4, 5)
y = [CustomConstantType(2, 3), CustomConstantType(4, 5)]
ref, loaded = self._test_serialization("EQUALS_MATCH", fn, x, y)
self._test_check_fn(ref, loaded, {"x": x, "y": y}, True)
# custom __eq__ says that CustomConstantType(4, 5) == CustomConstantType(4, 9)
self._test_check_fn(
ref,
loaded,
{
"x": CustomConstantType(4, 5),
"y": [CustomConstantType(2, 3), CustomConstantType(4, 9)],
},
True,
)
self._test_check_fn(ref, loaded, {"x": x, "y": []}, False)
self._test_check_fn(
ref,
loaded,
{
"x": x,
"y": [CustomConstantType(2, 3), CustomConstantType(6, 7)],
},
False,
)
def test_constant_match(self):
# === bool constant ===
def fn(x, y):
if y:
return x + 1
return x + 2
x = torch.randn(3)
y = True
ref, loaded = self._test_serialization("CONSTANT_MATCH", fn, x, y)
self._test_check_fn(ref, loaded, {"x": x, "y": y}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": True}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(4), "y": True}, True)
# guard should fail for different y value
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": False}, False)
# === None constant ===
def fn(x, y):
if y is None:
return x + 1
return x + 2
x = torch.randn(3)
y = None
ref, loaded = self._test_serialization("CONSTANT_MATCH", fn, x, y)
self._test_check_fn(ref, loaded, {"x": x, "y": y}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": None}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(4), "y": None}, True)
# guard should fail for non-None y value
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": 5}, False)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": True}, False)
# === int constant ===
def fn(x, y):
return x + y
x = torch.randn(3)
y = 5
ref, loaded = self._test_serialization("CONSTANT_MATCH", fn, x, y)
self._test_check_fn(ref, loaded, {"x": x, "y": y}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": 5}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(4), "y": 5}, True)
# guard should fail for different y value
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "y": 6}, False)
def test_nn_module(self):
def fn(m, x):
return m(x)
m = GlobalModule()
x = torch.randn(3)
# config setting controls whether the NN_MODULE guard is installed
with patch("torch._dynamo.config.inline_inbuilt_nn_modules", False):
# we don't support NN_MODULE because it adds an ID_MATCH guard, and we don't
# support that in serialization
with self.assertRaisesRegex(
PackageError, "NN_MODULE guard cannot be serialized."
):
self._test_serialization("NN_MODULE", fn, m, x)
def test_class_match(self):
def fn(x):
# usage of this context manager installs a FUNCTION_MATCH guard
with torch.no_grad():
y = x * 2
return y
x = torch.randn(3)
# we don't support FUNCTION_MATCH because it adds an ID_MATCH guard, and we don't
# support that in serialization
with self.assertRaisesRegex(
PackageError, "CLASS_MATCH guard cannot be serialized."
):
self._test_serialization("CLASS_MATCH", fn, x)
def test_closure_match(self):
def fn(x):
# usage of this global function installs a CLOSURE_MATCH guard
return global_func(x)
x = torch.randn(3)
# we don't support CLOSURE_MATCH because it adds a FUNCTION_MATCH guard, and we don't
# support that in serialization
with self.assertRaisesRegex(
PackageError, "CLOSURE_MATCH guard cannot be serialized."
):
self._test_serialization("CLOSURE_MATCH", fn, x)
def test_sequence_length(self):
# tuple input installs a SEQUENCE_LENGTH guard
def fn(t, x):
return t[1] + x
t = tuple(torch.randn(3) for _ in range(3))
x = torch.randn(3)
ref, loaded = self._test_serialization("SEQUENCE_LENGTH", fn, t, x)
self._test_check_fn(ref, loaded, {"x": x, "t": t}, True)
self._test_check_fn(
ref,
loaded,
{
"x": torch.randn(3),
"t": tuple(torch.randn(3) for _ in range(3)),
},
True,
)
# different types in tuple of same length shouldn't fail SEQUENCE_LENGTH guard
# (it should fail the separate TYPE_MATCH guard but that isn't tested here)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "t": (0, 1, 2)}, True)
# different length tuple
self._test_check_fn(
ref,
loaded,
{
"x": torch.randn(3),
"t": tuple(torch.randn(3) for _ in range(4)),
},
False,
)
def test_tuple_iterator_len(self):
def fn(t, x):
if len(list(t)) > 2:
return x * 2
return x + 1
tup = (1, 2, 3)
x = torch.randn(3)
# func to generate kwargs; useful for avoiding iterator exhaustion issues
def _gen_kwargs(tup=tup, x=x):
return {"t": iter(tup), "x": x}
ref, loaded = self._test_serialization(
"TUPLE_ITERATOR_LEN", fn, _gen_fn=_gen_kwargs
)
# same tuple
self._test_check_fn(ref, loaded, {"t": iter(tup), "x": x}, True)
self._test_check_fn(ref, loaded, {"t": iter(tup), "x": torch.randn(4)}, True)
# same length tuple, different contents
self._test_check_fn(ref, loaded, {"t": iter((3, 2, 1)), "x": x}, True)
self._test_check_fn(
ref, loaded, {"t": iter((3, 2, 1)), "x": torch.randn(4)}, True
)
# different tuple lengths
self._test_check_fn(ref, loaded, {"t": iter((1, 2)), "x": x}, False)
self._test_check_fn(
ref, loaded, {"t": iter((1, 2)), "x": torch.randn(4)}, False
)
self._test_check_fn(ref, loaded, {"t": iter((1, 2, 3, 4)), "x": x}, False)
self._test_check_fn(
ref, loaded, {"t": iter((1, 2, 3, 4)), "x": torch.randn(4)}, False
)
def test_range_iterator_match(self):
def fn(x, r):
y = x
for val in r:
y = x + val
return y
x = torch.randn(3)
def _gen_kwargs(x=x):
return {"x": x, "r": iter(range(2, 15, 3))}
ref, loaded = self._test_serialization(
"RANGE_ITERATOR_MATCH", fn, _gen_fn=_gen_kwargs
)
# same range
self._test_check_fn(ref, loaded, {"x": x, "r": iter(range(2, 15, 3))}, True)
self._test_check_fn(
ref, loaded, {"x": torch.randn(4), "r": iter(range(2, 15, 3))}, True
)
# equivalent even with different end
self._test_check_fn(ref, loaded, {"x": x, "r": iter(range(2, 16, 3))}, True)
self._test_check_fn(
ref, loaded, {"x": torch.randn(4), "r": iter(range(2, 16, 3))}, True
)
# different start
self._test_check_fn(ref, loaded, {"x": x, "r": iter(range(1, 15, 3))}, False)
self._test_check_fn(
ref, loaded, {"x": torch.randn(4), "r": iter(range(1, 15, 3))}, False
)
# different end resulting in different values
self._test_check_fn(ref, loaded, {"x": x, "r": iter(range(2, 18, 3))}, False)
self._test_check_fn(
ref, loaded, {"x": torch.randn(4), "r": iter(range(2, 18, 3))}, False
)
# different step
self._test_check_fn(ref, loaded, {"x": x, "r": iter(range(2, 15, 4))}, False)
self._test_check_fn(
ref, loaded, {"x": torch.randn(4), "r": iter(range(2, 15, 4))}, False
)
def test_dict_version(self):
def fn(x):
return pytree.tree_leaves(x)[0] + 1
with self.assertRaisesRegex(
PackageError, "DICT_VERSION guard cannot be serialized."
):
self._test_serialization("DICT_VERSION", fn, {"t": torch.randn(3)})
def test_dict_contains(self):
def fn(x):
if x.__contains__("t"):
return x["t"] + 1
else:
return torch.ones(3)
ref, loaded = self._test_serialization(
"DICT_CONTAINS", fn, {"t": torch.randn(3)}
)
self._test_check_fn(ref, loaded, {"x": {"t": torch.randn(3)}}, True)
self._test_check_fn(ref, loaded, {"x": {}}, False)
self._test_check_fn(
ref, loaded, {"x": {"t": torch.randn(3), "d": torch.randn(3)}}, True
)
def test_bool_match(self):
def fn(x, b):
if b:
return x + 1
else:
return x + 2
ref, loaded = self._test_serialization("BOOL_MATCH", fn, torch.randn(3), True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": True}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": False}, False)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": None}, False)
def test_none_match(self):
def fn(x, b):
if b is None:
return x + 1
else:
return x + 2
ref, loaded = self._test_serialization("NONE_MATCH", fn, torch.randn(3), None)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": None}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": False}, False)
self._test_check_fn(ref, loaded, {"x": torch.randn(3), "b": True}, False)
def test_id_match(self):
def fn(x):
return x + id(x)
with self.assertRaisesRegex(
PackageError, "ID_MATCH guard cannot be serialized."
):
self._test_serialization("ID_MATCH", fn, torch.randn(3))
@torch._dynamo.config.patch(caching_precompile=True)
def test_id_match_with_config(self):
def fn(x):
return x + id(x)
ref, loaded = self._test_serialization("ID_MATCH", fn, torch.randn(3))
self._test_check_fn(ref, loaded, {"x": torch.randn(3)}, True)
def fn(x):
# usage of this context manager installs a CLASS_MATCH guard
with torch.no_grad():
y = x * 2
return y
ref, loaded = self._test_serialization("CLASS_MATCH", fn, torch.randn(3))
self._test_check_fn(ref, loaded, {"x": torch.randn(3)}, True)
def test_dispatch_key_set_match(self):
def fn(x, dks):
if dks.has("CPU"):
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x = torch.randn(3)
dks = torch._C._dispatch_keys(x)
ref, loaded = self._test_serialization("DISPATCH_KEY_SET_MATCH", fn, x, dks)
self._test_check_fn(ref, loaded, {"x": x, "dks": dks}, True)
x = torch.randn(3, device="meta")
dks = torch._C._dispatch_keys(x)
self._test_check_fn(ref, loaded, {"x": x, "dks": dks}, False)
def test_dual_level(self):
def fn(x):
with torch.autograd.forward_ad.dual_level():
return x + 1
x = torch.randn(3)
ref, loaded = self._test_serialization("DUAL_LEVEL", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch.autograd.forward_ad.dual_level():
self._test_check_fn(ref, loaded, {"x": x}, False)
def test_functorch_stack_match(self):
# Test when functorch stack is empty.
def fn(x):
return torch.func.jvp(torch.sin, (x,), (x,))
x = torch.randn(3, 4)
ref, loaded = self._test_serialization("FUNCTORCH_STACK_MATCH", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.vmap.vmap_increment_nesting(2, "error"):
self._test_check_fn(ref, loaded, {"x": x}, False)
def fn(x):
def g(x):
return torch.vmap(torch.func.grad(torch.sin))(x)
return torch.vmap(g)(x)
x = torch.randn(4, 5)
ref, loaded = self._test_serialization("FUNCTORCH_STACK_MATCH", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
# Test when there are more than 0 functorch layers.
# Simulate the case where torch.compile is nested inside eager transforms.
# Case 1: vmap
def fn(x):
return x.sum()
ref = loaded = None
def run(x):
nonlocal ref, loaded
# Turn off automatic dynamic shape to so that functionalization
# doesn't produce extra SymInt to serialize.
with torch._dynamo.config.patch(automatic_dynamic_shapes=False):
ref, loaded = self._test_serialization("FUNCTORCH_STACK_MATCH", fn, x)
return fn(x)
torch.vmap(run)(x)
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
# Case 2: grad
x = torch.randn(3, 2)
ref = loaded = None
torch.func.grad(run)(x)
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, False)
# Case 3: jvp + vmap
x = torch.randn(3, 4)
ref = loaded = None
def fn(x):
return torch.func.jvp(torch.sin, (x,), (x,))
torch.func.jvp(torch.vmap(run), (x,), (x,))
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.eager_transforms.jvp_increment_nesting():
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
with torch._functorch.eager_transforms.jvp_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
# Case 4: functionalize
x = torch.randn(3, 2)
ref = loaded = None
torch.func.functionalize(run)(x)
self._test_check_fn(ref, loaded, {"x": x}, False)
torch._C._functorch._func_increment_nesting(True)
try:
self._test_check_fn(ref, loaded, {"x": x}, True)
finally:
torch._C._functorch._func_decrement_nesting()
with torch._functorch.eager_transforms.jvp_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
# Case 5: vmap + grad
def fn(x):
return x.sum()
x = torch.randn(3, 2)
ref = loaded = None
torch.vmap(torch.func.grad(run))(x)
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, True)
with torch._functorch.eager_transforms.grad_increment_nesting():
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.vmap.vmap_increment_nesting(1, "error"):
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch._functorch.eager_transforms.grad_increment_nesting():
self._test_check_fn(ref, loaded, {"x": x}, False)
def test_duplicate_input(self):
def fn(x, x_):
return x + x_
x = torch.randn(3, 2)
ref, loaded = self._test_serialization("DUPLICATE_INPUT", fn, x, x)
self._test_check_fn(ref, loaded, {"x": x, "x_": x}, True)
self._test_check_fn(ref, loaded, {"x": x, "x_": torch.randn(3, 2)}, False)
def test_weakref_alive(self):
mod = torch.nn.Linear(10, 10, bias=False)
for p in mod.parameters():
p.grad = torch.rand_like(p)
opt = torch.optim.SGD(mod.parameters(), lr=0.1)
def fn():
params = []
opt._init_group(opt.param_groups[0], params, [], [])
return params[0].sum()
with self.assertRaisesRegex(
PackageError, "WEAKREF_ALIVE guard cannot be serialized"
):
with torch.set_grad_enabled(False):
self._test_serialization("WEAKREF_ALIVE", fn)
def test_mapping_keys_check(self):
def fn(mp):
return mp["a"] + 1
mp = types.MappingProxyType({"a": torch.randn(3, 2), "b": torch.randn(3, 2)})
ref, loaded = self._test_serialization("MAPPING_KEYS_CHECK", fn, mp)
self._test_check_fn(ref, loaded, {"mp": mp}, True)
self._test_check_fn(
ref,
loaded,
{
"mp": types.MappingProxyType(
{"b": torch.randn(3, 2), "a": torch.randn(3, 2)}
)
},
False,
)
self._test_check_fn(
ref, loaded, {"mp": types.MappingProxyType({"a": torch.randn(3, 2)})}, False
)
def test_dict_keys_match(self):
def fn(x):
ret = 1
for k in x:
ret += x[k]
return ret
x = {"a": torch.randn(3, 2), "b": torch.randn(3, 2)}
ref, loaded = self._test_serialization("DICT_KEYS_MATCH", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
self._test_check_fn(
ref,
loaded,
{"x": {"b": torch.randn(3, 2), "a": torch.randn(3, 2)}},
False,
)
self._test_check_fn(ref, loaded, {"x": {"a": torch.randn(3, 2)}}, False)
@torch._dynamo.config.patch("skip_nnmodule_hook_guards", False)
def test_empty_nn_module_hooks_dict(self):
class Module(torch.nn.Module):
def forward(self, x: torch.Tensor):
return x + 1
m = Module()
def fn(x):
return m(x)
x = torch.ones(2, dtype=torch.float32)
ref, loaded = self._test_serialization("EMPTY_NN_MODULE_HOOKS_DICT", fn, x)
self._test_check_fn(ref, loaded, {"m": m, "x": x}, True)
h = m.register_forward_hook(lambda *args, **kwargs: None)
self._test_check_fn(ref, loaded, {"m": m, "x": x}, False)
h.remove()
h = m.register_forward_pre_hook(lambda *args, **kwargs: None)
self._test_check_fn(ref, loaded, {"m": m, "x": x}, False)
h.remove()
h = m.register_backward_hook(lambda *args, **kwargs: None)
self._test_check_fn(ref, loaded, {"m": m, "x": x}, False)
h.remove()
def test_grad_mode(self):
def fn(x):
return x + 1
x = torch.randn(3, 2)
with torch.enable_grad():
ref, loaded = self._test_serialization("GLOBAL_STATE", fn, x)
with torch.no_grad():
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch.enable_grad():
self._test_check_fn(ref, loaded, {"x": x}, True)
def test_grad_mode_loading(self):
def fn(x):
return x + 1
x = torch.randn(3, 2)
with torch.enable_grad():
ref, _ = self._test_serialization("GLOBAL_STATE", fn, x)
with torch.no_grad():
# Ensure guards state loading is not affected by the current global grad mode.
guards_state = pickle.loads(self._cached_guards_state)
check_fn_manager = CheckFunctionManager(
self._cached_f_code,
guards_state.output_graph,
shape_code_parts=guards_state.shape_code_parts,
)
loaded = check_fn_manager.guard_manager
self._test_check_fn(ref, loaded, {"x": x}, False)
def test_deterministic_algorithms(self):
def fn(x):
return x + 1
deterministic_restore = torch.are_deterministic_algorithms_enabled()
try:
x = torch.randn(3, 2)
torch.use_deterministic_algorithms(True)
ref, loaded = self._test_serialization("GLOBAL_STATE", fn, x)
torch.use_deterministic_algorithms(False)
self._test_check_fn(ref, loaded, {"x": x}, False)
torch.use_deterministic_algorithms(True)
self._test_check_fn(ref, loaded, {"x": x}, True)
finally:
torch.use_deterministic_algorithms(deterministic_restore)
def test_torch_function_state(self):
def fn(x):
return x + 1
x = torch.randn(3, 2)
class LocalTorchFunctionMode(TorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
return func(*args, **kwargs)
with GlobalTorchFunctionMode():
ref, loaded = self._test_serialization("TORCH_FUNCTION_STATE", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
self._test_check_fn(ref, loaded, {"x": x}, False)
with GlobalTorchFunctionMode():
ref, loaded = self._test_serialization("GLOBAL_STATE", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
with GlobalTorchFunctionMode():
with torch._C.DisableTorchFunction():
self._test_check_fn(ref, loaded, {"x": x}, False)
with self.assertRaisesRegex(
PackageError,
"defined in local scope. Please define the class at global scope",
):
with LocalTorchFunctionMode():
ref, loaded = self._test_serialization("TORCH_FUNCTION_STATE", fn, x)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_fsdp_training_state(self):
from torch.distributed.fsdp._fully_shard._fsdp_common import TrainingState
from torch.distributed.fsdp._fully_shard._fsdp_param_group import FSDPParamGroup
param_group = FSDPParamGroup(
[], # params: List[nn.Parameter],
(torch.nn.Linear(1, 1),), # module: nn.Module,
None, # mesh_info: FSDPMeshInfo,
None, # post_forward_mesh_info: Optional[FSDPMeshInfo],
torch.device("cpu"), # device: torch.device,
None, # shard_placement_fn: Optional[Callable],
None, # mp_policy: MixedPrecisionPolicy,
None, # offload_policy: OffloadPolicy,
)
def fn(x):
with param_group.use_training_state(TrainingState.FORWARD):
if param_group._training_state == TrainingState.FORWARD:
return x + 1
else:
return x - 1
x = torch.randn(3, 2)
with torch.enable_grad():
ref, loaded = self._test_serialization("GLOBAL_STATE", fn, x)
with torch.no_grad():
self._test_check_fn(ref, loaded, {"x": x}, False)
with torch.enable_grad():
self._test_check_fn(ref, loaded, {"x": x}, True)
def test_default_device(self):
device = torch.get_default_device()
def fn(x):
return x + 1
x = torch.randn(3, 2)
try:
torch.set_default_device("cpu")
ref, loaded = self._test_serialization("DEFAULT_DEVICE", fn, x)
torch.set_default_device("meta")
self._test_check_fn(ref, loaded, {"x": x}, False)
torch.set_default_device("cpu")
self._test_check_fn(ref, loaded, {"x": x}, True)
finally:
torch.set_default_device(device)
def test_shape_env(self):
def fn(x):
return x + 1
x = torch.randn(3, 2)
ref, loaded = self._test_serialization("SHAPE_ENV", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
x = torch.randn(3, 2)
torch._dynamo.mark_dynamic(x, 0, min=3, max=10)
ref, loaded = self._test_serialization("SHAPE_ENV", fn, x)
self._test_check_fn(ref, loaded, {"x": torch.randn(4, 2)}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(10, 2)}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(11, 2)}, False)
self._test_check_fn(ref, loaded, {"x": torch.randn(2, 2)}, False)
x = torch.randn(3, 3, 2)
torch._dynamo.mark_dynamic(x, 1, min=3, max=10)
ref, loaded = self._test_serialization("SHAPE_ENV", fn, x)
self._test_check_fn(ref, loaded, {"x": torch.randn(3, 4, 2)}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3, 10, 2)}, True)
self._test_check_fn(ref, loaded, {"x": torch.randn(3, 11, 2)}, False)
self._test_check_fn(ref, loaded, {"x": torch.randn(3, 2, 2)}, False)
def test_builtin_match(self):
def fn(x):
# usage of getattr() here installs a BUILTIN_MATCH guard
s = getattr(x, "shape") # noqa: B009
return x + s[0]
x = torch.randn(3)
ref, loaded = self._test_serialization("BUILTIN_MATCH", fn, x)
self._test_check_fn(ref, loaded, {"x": x}, True)
getattr_original = getattr
def getattr_new(*args, **kwargs):
return getattr_original(*args, **kwargs)
builtins_dict = (
__builtins__ if isinstance(__builtins__, dict) else __builtins__.__dict__
)
builtins_dict["getattr"] = getattr_new
try:
self._test_check_fn(ref, loaded, {"x": x}, False)
finally:
builtins_dict["getattr"] = getattr_original
def test_skipped_objects(self):
def foo():
pass
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
self.code = foo.__code__
self.foo = foo
self.p = torch.nn.Parameter(torch.randn(3, 2))
def forward(self, x):
z = x + 1
for p in self.parameters():
z += p
return z
m = Module()
ref, loaded = self._test_serialization("TENSOR_MATCH", m, torch.randn(3, 2))
self._test_check_fn(ref, loaded, {"self": m, "x": torch.randn(3, 2)}, True)
def test_bound_method_input(self):
class MyModule(torch.nn.Module):
def forward(self, foo, x):
return x + id(type(foo))
m = MyModule()
ref, loaded = self._test_serialization(
"TYPE_MATCH", m, MyClass().add, torch.randn(3, 2)
)
self._test_check_fn(
ref, loaded, {"self": m, "foo": MyClass().add, "x": torch.randn(3, 2)}, True
)
def test_bound_methods_missing(self):
class MyClass:
def __getstate__(self):
raise NotImplementedError
def add(self, x):
return x + 1
def foo(x: torch.Tensor, y: list[MyClass]):
assert len(y) == 1
return x + 1
ref, loaded = self._test_serialization(
"TYPE_MATCH", foo, torch.randn(3, 2), [MyClass()]
)
self._test_check_fn(
ref, loaded, {"x": torch.randn(3, 2), "y": [MyClass()]}, True
)
def test_bound_methods_empty(self):
def foo(x, y):
assert callable(y[0])
return x + 1
ref, loaded = self._test_serialization(
"TYPE_MATCH", foo, torch.randn(3, 2), [MyClassNotSerializable().add]
)
self._test_check_fn(
ref,
loaded,
{"x": torch.randn(3, 2), "y": [MyClassNotSerializable().add]},
True,
)
def test_ddp_module(self):
import torch.distributed as dist
if not dist.is_available():
self.skipTest("Torch distributed is not available")
from torch.nn.parallel import DistributedDataParallel as DDP
tmpfile = tempfile.NamedTemporaryFile()
dist.init_process_group(
backend="gloo", rank=0, world_size=1, init_method=f"file://{tmpfile.name}"
)
try:
ddp_model = DDP(GlobalNestedModule())
def foo(ddp, x):
return ddp(x)
x = torch.randn(10)
package = CompilePackage(foo)
torch._dynamo.optimize(
package=package,
guard_filter_fn=lambda gs: [
x.guard_type not in ("CLOSURE_MATCH", "ID_MATCH", "CLASS_MATCH")
for x in gs
],
)(foo)(ddp_model, x)
self.assertEqual(len(package._codes[foo.__code__].guarded_codes), 1)
torch._dynamo.package.load_guards_state(
package._codes[foo.__code__].guarded_codes[0].guards_state
)
finally:
dist.destroy_process_group()
def test_dict_keys_serialization(self):
d = {1: 2, 3: 4}
def foo(x, y):
for k in y:
x += k
return x
ref, loaded = self._test_serialization(
"TYPE_MATCH", foo, torch.randn(3, 2), d.keys()
)
self._test_check_fn(
ref,
loaded,
{"x": torch.randn(3, 2), "y": d.keys()},
True,
)
def test_unserializable_sharded_tensor(self):
import torch.distributed as dist
if not dist.is_available():
self.skipTest("Torch distributed is not available")
tmpfile = tempfile.NamedTemporaryFile()
dist.init_process_group(
backend="gloo", rank=0, world_size=1, init_method=f"file://{tmpfile.name}"
)
try:
ChunkShardingSpec = dist._shard.sharding_spec.ChunkShardingSpec
ShardedTensor = dist._shard.sharded_tensor.ShardedTensor
tensor = torch.arange(2, dtype=torch.int64)
local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2]), 0)
sharding_dim = 0
sharding_spec = ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cpu",
],
)
st = ShardedTensor._init_from_local_tensor(
local_tensor, sharding_spec, [1, 4]
)
def foo(inputs):
return inputs.x + 1
ref, loaded = self._test_serialization(
"TENSOR_MATCH", foo, Inputs(torch.randn(3, 2), st)
)
self._test_check_fn(
ref, loaded, {"inputs": Inputs(torch.randn(3, 2), st)}, True
)
finally:
dist.destroy_process_group()
def test_function_with_wrong_fqn(self):
def foo(inputs):
return inputs.x + 1
x = torch.randn(3, 2)
ref, loaded = self._test_serialization(
"TENSOR_MATCH", foo, Inputs(x, global_func_wrong_fqn)
)
self._test_check_fn(
ref, loaded, {"inputs": Inputs(x, global_func_wrong_fqn)}, True
)
def test_c10d_work(self):
import torch.distributed as dist
if not dist.is_available():
self.skipTest("Torch distributed is not available")
Work = dist.distributed_c10d.Work
class DummyWork(Work):
def __init__(self, should_succeed=True):
super().__init__()
self._done = False
self._should_succeed = should_succeed
def is_completed(self):
return self._done
def is_success(self):
return self._should_succeed
def wait(self, timeout=None):
self._done = True
if not self._should_succeed:
raise RuntimeError("DummyWork failed")
return self
def result(self):
if not self._should_succeed:
raise RuntimeError("DummyWork failed")
return "dummy_result"
def foo(inputs):
return inputs.x + 1
x = torch.randn(3, 2)
ref, loaded = self._test_serialization(
"TENSOR_MATCH", foo, Inputs(x, DummyWork())
)
self._test_check_fn(ref, loaded, {"inputs": Inputs(x, DummyWork())}, True)
def test_unused_weakref(self):
def foo(inputs):
return inputs.x + 1
x = torch.randn(3, 2)
ref, loaded = self._test_serialization(
"TENSOR_MATCH", foo, Inputs(x, weakref.ref(x))
)
self._test_check_fn(ref, loaded, {"inputs": Inputs(x, weakref.ref(x))}, True)
def test_unused_stream(self):
if not torch.cuda.is_available():
self.skipTest("CUDA is not available")
def foo(inputs):
return inputs.x + 1
x = torch.randn(3, 2)
ref, loaded = self._test_serialization(
"TENSOR_MATCH", foo, Inputs(x, torch.cuda.Stream())
)
self._test_check_fn(
ref, loaded, {"inputs": Inputs(x, torch.cuda.Stream())}, True
)
def test_unused_process_group(self):
import torch.distributed as dist
if not dist.is_available():
self.skipTest("Torch distributed is not available")
def foo(inputs):
return inputs.x + 1
tmpfile = tempfile.NamedTemporaryFile()
dist.init_process_group(
backend="gloo",
init_method=f"file://{tmpfile.name}",
rank=0,
world_size=1,
)
try:
pg = dist.distributed_c10d._get_default_group()
x = torch.randn(3, 2)
ref, loaded = self._test_serialization("TENSOR_MATCH", foo, Inputs(x, pg))
self._test_check_fn(ref, loaded, {"inputs": Inputs(x, pg)}, True)
finally:
dist.destroy_process_group()
def test_unserializable_submodule(self):
def foo(mod, x):
return mod(x)
x = torch.randn(10, 10)
mod = GlobalNestedModule(ModuleNotSerializable())
ref, loaded = self._test_serialization("TENSOR_MATCH", foo, mod, x)
self._test_check_fn(ref, loaded, {"mod": mod, "x": x}, True)
def test_closure_var_missing(self):
captured = torch.randn(3, 2)
def bar(x):
return x + captured
def foo(f, x):
return f(x)
x = torch.randn(3, 2)
ref, loaded = self._test_serialization("TENSOR_MATCH", foo, bar, x)
self._test_check_fn(ref, loaded, {"f": bar, "x": x}, True)
def test_bound_method_patched_forward(self):
def forward(x):
return x + 1
m = FlatModule()
m_forward = m.forward
m.forward = forward
def foo(f, x):
assert callable(f)
return f(x)
x = torch.randn(3, 2)
ref, loaded = self._test_serialization("TYPE_MATCH", foo, m_forward, x)
self._test_check_fn(ref, loaded, {"f": m_forward, "x": x}, True)
def test_guard_on_key_order_with_cache(self):
def foo(x, mod):
for y in mod.d.values():
x *= y
return x
x = torch.randn(3, 2)
d = {"a": 1e9, "b": 1e-9}
ref, loaded = self._test_serialization(
"DICT_KEYS_MATCH", foo, x, ModWithDict(d)
)
self._test_check_fn(
ref, loaded, {"x": x, "d": ModWithDict({"b": 1e-9, "a": 1e9})}, False
)
def test_global_state_guard_filter(self):
def foo(x):
return x + 1
x = torch.randn(3, 2)
with torch.no_grad():
compiled_fn = torch.compile(
foo, options={"guard_filter_fn": torch.compiler.skip_all_guards_unsafe}
)
compiled_fn(x)
# Check global guards are gone.
with torch.enable_grad(), torch.compiler.set_stance("fail_on_recompile"):
self.assertEqual(compiled_fn(x), foo(x))
def test_torch_function_state_filter(self):
def foo(x):
return x + 1
x = torch.randn(3, 2)
with GlobalTorchFunctionMode():
compiled_fn = torch.compile(
foo, options={"guard_filter_fn": torch.compiler.skip_all_guards_unsafe}
)
compiled_fn(x)
# Check global guards are gone.
with torch.compiler.set_stance("fail_on_recompile"):
self.assertEqual(compiled_fn(x), foo(x))
|
TestGuardSerialization
|
python
|
getlogbook__logbook
|
src/logbook/queues.py
|
{
"start": 16109,
"end": 16767
}
|
class ____(Handler):
"""Implements a handler that dispatches over a queue to a different
process. It is connected to a subscriber with a
:class:`multiprocessing.Queue`::
from multiprocessing import Queue
from logbook.queues import MultiProcessingHandler
queue = Queue(-1)
handler = MultiProcessingHandler(queue)
"""
def __init__(self, queue, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.queue = queue
_fix_261_mplog()
def emit(self, record):
self.queue.put_nowait(record.to_dict(json_safe=True))
|
MultiProcessingHandler
|
python
|
doocs__leetcode
|
solution/0400-0499/0448.Find All Numbers Disappeared in an Array/Solution2.py
|
{
"start": 0,
"end": 256
}
|
class ____:
def findDisappearedNumbers(self, nums: List[int]) -> List[int]:
for x in nums:
i = abs(x) - 1
if nums[i] > 0:
nums[i] *= -1
return [i + 1 for i in range(len(nums)) if nums[i] > 0]
|
Solution
|
python
|
gevent__gevent
|
src/gevent/tests/test__threading_2.py
|
{
"start": 22680,
"end": 22779
}
|
class ____(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
@skipDueToHang
|
LockTests
|
python
|
aio-libs__aiohttp
|
aiohttp/client_exceptions.py
|
{
"start": 2804,
"end": 2893
}
|
class ____(ClientResponseError):
"""ContentType found is not valid."""
|
ContentTypeError
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_pinned.py
|
{
"start": 127,
"end": 944
}
|
class ____(ContextResettingTestCase):
def _run_copies(self, A):
A0 = np.copy(A)
stream = cuda.stream()
ptr = cuda.to_device(A, copy=False, stream=stream)
ptr.copy_to_device(A, stream=stream)
ptr.copy_to_host(A, stream=stream)
stream.synchronize()
self.assertTrue(np.allclose(A, A0))
def test_pinned(self):
machine = platform.machine()
if machine.startswith('arm') or machine.startswith('aarch64'):
count = 262144 # 2MB
else:
count = 2097152 # 16MB
A = np.arange(count)
with cuda.pinned(A):
self._run_copies(A)
def test_unpinned(self):
A = np.arange(2 * 1024 * 1024) # 16 MB
self._run_copies(A)
if __name__ == '__main__':
unittest.main()
|
TestPinned
|
python
|
django__django
|
tests/mutually_referential/tests.py
|
{
"start": 63,
"end": 541
}
|
class ____(TestCase):
def test_mutually_referential(self):
# Create a Parent
q = Parent(name="Elizabeth")
q.save()
# Create some children
c = q.child_set.create(name="Charles")
q.child_set.create(name="Edward")
# Set the best child
# No assertion require here; if basic assignment and
# deletion works, the test passes.
q.bestchild = c
q.save()
q.delete()
|
MutuallyReferentialTests
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeParams1.py
|
{
"start": 607,
"end": 1360
}
|
class ____:
def object[T](self, target: object, new: T) -> T: ...
# This should generate an error because T3 is duplicated.
def func3[T3, S1, T3](): ...
def func4[T4](T4: int): ...
def func5[T5](a: int):
# This should generate an error because T5 is already in use.
class ClassA[T5]: ...
# This should generate an error because T5 is already in use.
def inner_func1[T5](): ...
def func6[T6](T7: int):
class ClassA[T7]: ...
def inner_func1[T7](): ...
global T2
class ClassB[T2]:
global T2
class ClassC[T3]:
T3 = 4
T3 = 4
def func7[T8: ForwardRefClass[str], T9: "ForwardRefClass[int]"]():
pass
def func8[T10: (ForwardRefClass[str], "ForwardRefClass[int]")]():
pass
|
ClassH
|
python
|
huggingface__transformers
|
src/transformers/models/sew/modular_sew.py
|
{
"start": 1510,
"end": 1582
}
|
class ____(Wav2Vec2NoLayerNormConvLayer):
pass
|
SEWNoLayerNormConvLayer
|
python
|
facelessuser__pymdown-extensions
|
tests/test_extensions/test_blocks/test_legacy_tab.py
|
{
"start": 100,
"end": 1225
}
|
class ____(util.MdCase):
"""Test legacy tab slug cases."""
extension = ['pymdownx.blocks.tab', 'toc']
extension_configs = {
'pymdownx.blocks.tab': {'slugify': slugify(case='lower')}
}
MD = r"""
### Here is some text
/// tab | Here is some text
content
///
/// tab | Here is some text
content
///
"""
def test_tab_slugs(self):
"""Test tab slugs."""
self.check_markdown(
self.MD,
'''
<h3 id="here-is-some-text">Here is some text</h3>
<div class="tabbed-set" data-tabs="1:2"><input checked="checked" id="here-is-some-text_1" name="__tabbed_1" type="radio" /><label for="here-is-some-text_1">Here is some text</label><div class="tabbed-content">
<p>content</p>
</div>
<input id="here-is-some-text_2" name="__tabbed_1" type="radio" /><label for="here-is-some-text_2">Here is some text</label><div class="tabbed-content">
<p>content</p>
</div>
</div>
''', # noqa: E501
True
)
|
TestLegacyTabSlugs
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/marketing_platform/operators/campaign_manager.py
|
{
"start": 19097,
"end": 22872
}
|
class ____(BaseOperator):
"""
Updates existing conversions.
.. seealso::
Check official API docs:
`https://developers.google.com/doubleclick-advertisers/rest/v4/conversions/batchupdate`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleCampaignManagerBatchUpdateConversionsOperator`
:param profile_id: User profile ID associated with this request.
:param conversions: Conversations to update, should be type of Conversion:
https://developers.google.com/doubleclick-advertisers/rest/v4/conversions
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_updates: The maximum number of conversions that failed to be updated
:param api_version: The version of the api that will be requested, for example 'v4'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"profile_id",
"conversions",
"encryption_entity_type",
"encryption_entity_id",
"encryption_source",
"impersonation_chain",
)
def __init__(
self,
*,
profile_id: str,
conversions: list[dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_updates: int = 0,
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.profile_id = profile_id
self.conversions = conversions
self.encryption_entity_type = encryption_entity_type
self.encryption_entity_id = encryption_entity_id
self.encryption_source = encryption_source
self.max_failed_updates = max_failed_updates
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = GoogleCampaignManagerHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
response = hook.conversions_batch_update(
profile_id=self.profile_id,
conversions=self.conversions,
encryption_entity_type=self.encryption_entity_type,
encryption_entity_id=self.encryption_entity_id,
encryption_source=self.encryption_source,
max_failed_updates=self.max_failed_updates,
)
return response
|
GoogleCampaignManagerBatchUpdateConversionsOperator
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_relationships.py
|
{
"start": 159727,
"end": 168579
}
|
class ____(
_RelationshipErrors, fixtures.MappedTest
):
@classmethod
def define_tables(cls, metadata):
Table("foos", metadata, Column("id", Integer, primary_key=True))
Table(
"foobars", metadata, Column("fid", Integer), Column("bid", Integer)
)
Table("bars", metadata, Column("id", Integer, primary_key=True))
Table(
"foobars_with_fks",
metadata,
Column("fid", Integer, ForeignKey("foos.id")),
Column("bid", Integer, ForeignKey("bars.id")),
)
Table(
"foobars_with_many_columns",
metadata,
Column("fid", Integer),
Column("bid", Integer),
Column("fid1", Integer),
Column("bid1", Integer),
Column("fid2", Integer),
Column("bid2", Integer),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
def test_no_join(self):
foobars, bars, Foo, Bar, foos = (
self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={"bars": relationship(Bar, secondary=foobars)},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_join(configure_mappers, "Foo.bars", "foobars")
def test_no_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (
self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars,
primaryjoin=foos.c.id > foobars.c.fid,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_join(configure_mappers, "Foo.bars", "foobars")
def test_no_fks(self):
foobars_with_many_columns, bars, Bar, foobars, Foo, foos = (
self.tables.foobars_with_many_columns,
self.tables.bars,
self.classes.Bar,
self.tables.foobars,
self.classes.Foo,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid == bars.c.id,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
sa.orm.configure_mappers()
eq_(Foo.bars.property.synchronize_pairs, [(foos.c.id, foobars.c.fid)])
eq_(
Foo.bars.property.secondary_synchronize_pairs,
[(bars.c.id, foobars.c.bid)],
)
self.mapper_registry.dispose()
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars_with_many_columns,
primaryjoin=foos.c.id == foobars_with_many_columns.c.fid,
secondaryjoin=foobars_with_many_columns.c.bid == bars.c.id,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
sa.orm.configure_mappers()
eq_(
Foo.bars.property.synchronize_pairs,
[(foos.c.id, foobars_with_many_columns.c.fid)],
)
eq_(
Foo.bars.property.secondary_synchronize_pairs,
[(bars.c.id, foobars_with_many_columns.c.bid)],
)
def test_local_col_setup(self):
foobars_with_fks, bars, Bar, Foo, foos = (
self.tables.foobars_with_fks,
self.tables.bars,
self.classes.Bar,
self.classes.Foo,
self.tables.foos,
)
# ensure m2m backref is set up with correct annotations
# [ticket:2578]
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar, secondary=foobars_with_fks, backref="foos"
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
sa.orm.configure_mappers()
eq_(Foo.bars.property._join_condition.local_columns, {foos.c.id})
eq_(Bar.foos.property._join_condition.local_columns, {bars.c.id})
def test_bad_primaryjoin(self):
foobars_with_fks, bars, Bar, foobars, Foo, foos = (
self.tables.foobars_with_fks,
self.tables.bars,
self.classes.Bar,
self.tables.foobars,
self.classes.Foo,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars,
primaryjoin=foos.c.id > foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_equality(
configure_mappers, "foos.id > foobars.fid", "Foo.bars", "primary"
)
self.mapper_registry.dispose()
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars_with_fks,
primaryjoin=foos.c.id > foobars_with_fks.c.fid,
secondaryjoin=foobars_with_fks.c.bid <= bars.c.id,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_equality(
configure_mappers,
"foos.id > foobars_with_fks.fid",
"Foo.bars",
"primary",
)
self.mapper_registry.dispose()
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars_with_fks,
primaryjoin=foos.c.id > foobars_with_fks.c.fid,
secondaryjoin=foobars_with_fks.c.bid <= bars.c.id,
viewonly=True,
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
sa.orm.configure_mappers()
def test_bad_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (
self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id,
foreign_keys=[foobars.c.fid],
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_relevant_fks(
configure_mappers,
"foobars.bid <= bars.id",
"Foo.bars",
"secondary",
)
def test_no_equated_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (
self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos,
)
self.mapper_registry.map_imperatively(
Foo,
foos,
properties={
"bars": relationship(
Bar,
secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id,
foreign_keys=[foobars.c.fid, foobars.c.bid],
)
},
)
self.mapper_registry.map_imperatively(Bar, bars)
self._assert_raises_no_equality(
configure_mappers,
"foobars.bid <= bars.id",
"Foo.bars",
"secondary",
)
|
InvalidRelationshipEscalationTestM2M
|
python
|
streamlit__streamlit
|
lib/streamlit/elements/graphviz_chart.py
|
{
"start": 1456,
"end": 7941
}
|
class ____:
@gather_metrics("graphviz_chart")
def graphviz_chart(
self,
figure_or_dot: FigureOrDot,
use_container_width: bool | None = None,
*, # keyword-only arguments:
width: Width = "content",
height: Height = "content",
) -> DeltaGenerator:
"""Display a graph using the dagre-d3 library.
.. Important::
You must install ``graphviz>=0.19.0`` to use this command. You can
install all charting dependencies (except Bokeh) as an extra with
Streamlit:
.. code-block:: shell
pip install streamlit[charts]
Parameters
----------
figure_or_dot : graphviz.dot.Graph, graphviz.dot.Digraph, graphviz.sources.Source, str
The Graphlib graph object or dot string to display
use_container_width : bool
Whether to override the figure's native width with the width of
the parent container. If ``use_container_width`` is ``False``
(default), Streamlit sets the width of the chart to fit its contents
according to the plotting library, up to the width of the parent
container. If ``use_container_width`` is ``True``, Streamlit sets
the width of the figure to match the width of the parent container.
.. deprecated::
``use_container_width`` is deprecated and will be removed in a
future release. For ``use_container_width=True``, use
``width="stretch"``. For ``use_container_width=False``, use
``width="content"``.
width : "content", "stretch", or int
The width of the chart element. This can be one of the following:
- ``"content"`` (default): The width of the element matches the
width of its content, but doesn't exceed the width of the parent
container.
- ``"stretch"``: The width of the element matches the width of the
parent container.
- An integer specifying the width in pixels: The element has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the element matches the width
of the parent container.
height : "content", "stretch", or int
The height of the chart element. This can be one of the following:
- ``"content"`` (default): The height of the element matches the
height of its content.
- ``"stretch"``: The height of the element matches the height of
its content or the height of the parent container, whichever is
larger. If the element is not in a parent container, the height
of the element matches the height of its content.
- An integer specifying the height in pixels: The element has a
fixed height. If the content is larger than the specified
height, scrolling is enabled.
Example
-------
>>> import streamlit as st
>>> import graphviz
>>>
>>> # Create a graphlib graph object
>>> graph = graphviz.Digraph()
>>> graph.edge("run", "intr")
>>> graph.edge("intr", "runbl")
>>> graph.edge("runbl", "run")
>>> graph.edge("run", "kernel")
>>> graph.edge("kernel", "zombie")
>>> graph.edge("kernel", "sleep")
>>> graph.edge("kernel", "runmem")
>>> graph.edge("sleep", "swap")
>>> graph.edge("swap", "runswap")
>>> graph.edge("runswap", "new")
>>> graph.edge("runswap", "runmem")
>>> graph.edge("new", "runmem")
>>> graph.edge("sleep", "runmem")
>>>
>>> st.graphviz_chart(graph)
Or you can render the chart from the graph using GraphViz's Dot
language:
>>> st.graphviz_chart('''
digraph {
run -> intr
intr -> runbl
runbl -> run
run -> kernel
kernel -> zombie
kernel -> sleep
kernel -> runmem
sleep -> swap
swap -> runswap
runswap -> new
runswap -> runmem
new -> runmem
sleep -> runmem
}
''')
.. output::
https://doc-graphviz-chart.streamlit.app/
height: 600px
"""
if use_container_width is not None:
show_deprecation_warning(
make_deprecated_name_warning(
"use_container_width",
"width",
"2025-12-31",
"For `use_container_width=True`, use `width='stretch'`. "
"For `use_container_width=False`, use `width='content'`.",
include_st_prefix=False,
),
show_in_browser=False,
)
width = "stretch" if use_container_width else "content"
# Generate element ID from delta path
delta_path = self.dg._get_delta_path_str()
element_id = calc_md5(delta_path.encode())
graphviz_chart_proto = GraphVizChartProto()
marshall(graphviz_chart_proto, figure_or_dot, element_id)
# Validate and set layout configuration
validate_width(width, allow_content=True)
validate_height(height, allow_content=True)
layout_config = LayoutConfig(width=width, height=height)
return self.dg._enqueue(
"graphviz_chart", graphviz_chart_proto, layout_config=layout_config
)
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
def marshall(
proto: GraphVizChartProto,
figure_or_dot: FigureOrDot,
element_id: str,
) -> None:
"""Construct a GraphViz chart object.
See DeltaGenerator.graphviz_chart for docs.
"""
if type_util.is_graphviz_chart(figure_or_dot):
dot = figure_or_dot.source
engine = figure_or_dot.engine
elif isinstance(figure_or_dot, str):
dot = figure_or_dot
engine = "dot"
else:
raise StreamlitAPIException(
f"Unhandled type for graphviz chart: {type(figure_or_dot)}"
)
proto.spec = dot
proto.engine = engine
proto.element_id = element_id
|
GraphvizMixin
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_conversation_item_system_message.py
|
{
"start": 465,
"end": 1224
}
|
class ____(BaseModel):
content: List[Content]
"""The content of the message."""
role: Literal["system"]
"""The role of the message sender. Always `system`."""
type: Literal["message"]
"""The type of the item. Always `message`."""
id: Optional[str] = None
"""The unique ID of the item.
This may be provided by the client or generated by the server.
"""
object: Optional[Literal["realtime.item"]] = None
"""Identifier for the API object being returned - always `realtime.item`.
Optional when creating a new item.
"""
status: Optional[Literal["completed", "incomplete", "in_progress"]] = None
"""The status of the item. Has no effect on the conversation."""
|
RealtimeConversationItemSystemMessage
|
python
|
spack__spack
|
lib/spack/spack/traverse.py
|
{
"start": 1883,
"end": 2484
}
|
class ____:
"""A visitor that reverses the arrows in the DAG, following dependents."""
def __init__(self, visitor, depflag: dt.DepFlag = dt.ALL):
self.visitor = visitor
self.depflag = depflag
def accept(self, item):
return self.visitor.accept(item)
def neighbors(self, item):
"""Return dependents, note that we actually flip the edge direction to allow
generic programming"""
spec = item.edge.spec
return sort_edges(
[edge.flip() for edge in spec.edges_from_dependents(depflag=self.depflag)]
)
|
ReverseVisitor
|
python
|
eventlet__eventlet
|
tests/pools_test.py
|
{
"start": 6104,
"end": 6532
}
|
class ____(TestCase):
mode = 'static'
def setUp(self):
self.pool = IntPool(max_size=3, order_as_stack=True)
def test_ordering(self):
# items come out in the reverse order they are put
one, two = self.pool.get(), self.pool.get()
self.pool.put(one)
self.pool.put(two)
self.assertEqual(self.pool.get(), two)
self.assertEqual(self.pool.get(), one)
|
TestOrderAsStack
|
python
|
Textualize__textual
|
docs/examples/styles/width_comparison.py
|
{
"start": 240,
"end": 790
}
|
class ____(App):
CSS_PATH = "width_comparison.tcss"
def compose(self):
yield Horizontal(
Placeholder(id="cells"), # (1)!
Placeholder(id="percent"),
Placeholder(id="w"),
Placeholder(id="h"),
Placeholder(id="vw"),
Placeholder(id="vh"),
Placeholder(id="auto"),
Placeholder(id="fr1"),
Placeholder(id="fr3"),
)
yield Ruler()
if __name__ == "__main__":
app = WidthComparisonApp()
app.run()
|
WidthComparisonApp
|
python
|
plotly__plotly.py
|
plotly/graph_objs/image/legendgrouptitle/_font.py
|
{
"start": 233,
"end": 9916
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "image.legendgrouptitle"
_path_str = "image.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.image.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.image.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.image.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/client/utils.py
|
{
"start": 2018,
"end": 2535
}
|
class ____(NamedTuple):
repository_location_name: str
repository_name: str
job_name: str
@staticmethod
def from_node(node: dict[str, Any]) -> list["JobInfo"]:
repo_name = node["name"]
repo_location_name = node["location"]["name"]
return [
JobInfo(
repository_location_name=repo_location_name,
repository_name=repo_name,
job_name=job["name"],
)
for job in node["pipelines"]
]
|
JobInfo
|
python
|
scipy__scipy
|
scipy/optimize/tests/test__shgo.py
|
{
"start": 7933,
"end": 8304
}
|
class ____(StructTestFunction):
def f(self, x):
if x[0] == 3.0 and x[1] == 3.0:
return 50
else:
return 100
g = None
cons = wrap_constraints(g)
test_table = StructTestTable(bounds=[(-10, 10), (-10, 10)],
expected_fun=[50],
expected_x=[3.0, 3.0])
|
StructTestTable
|
python
|
getsentry__sentry
|
src/sentry/notifications/notifications/organization_request/base.py
|
{
"start": 698,
"end": 2576
}
|
class ____(BaseNotification, abc.ABC):
notification_setting_type_enum = NotificationSettingEnum.APPROVAL
RoleBasedRecipientStrategyClass: type[RoleBasedRecipientStrategy]
def __init__(self, organization: Organization, requester: User) -> None:
super().__init__(organization)
self.requester = requester
self.role_based_recipient_strategy = self.RoleBasedRecipientStrategyClass(organization)
@property
def reference(self) -> Model | None:
return self.organization
def get_context(self) -> MutableMapping[str, Any]:
return {}
def determine_recipients(self) -> list[Actor]:
return Actor.many_from_object(self.role_based_recipient_strategy.determine_recipients())
def get_notification_title(
self, provider: ExternalProviders, context: Mapping[str, Any] | None = None
) -> str:
# purposely use empty string for the notification title
return ""
def build_notification_footer(self, recipient: Actor, provider: ExternalProviders) -> str:
if recipient.is_team:
raise NotImplementedError
settings_url = self.format_url(
text="Notification Settings",
url=self.get_settings_url(recipient, provider),
provider=provider,
)
return self.role_based_recipient_strategy.build_notification_footer_from_settings_url(
settings_url
)
def get_title_link(self, recipient: Actor, provider: ExternalProviders) -> str | None:
return None
def get_log_params(self, recipient: Actor) -> MutableMapping[str, Any]:
if recipient.is_team:
raise NotImplementedError
return {
**super().get_log_params(recipient),
"user_id": self.requester.id,
"target_user_id": recipient.id,
}
|
OrganizationRequestNotification
|
python
|
milvus-io__pymilvus
|
pymilvus/client/asynch.py
|
{
"start": 1451,
"end": 4978
}
|
class ____(AbstractFuture):
def __init__(
self,
future: Any,
done_callback: Optional[Callable] = None,
pre_exception: Optional[Callable] = None,
**kwargs,
) -> None:
self._future = future
# keep compatible (such as Future(future, done_callback)), deprecated later
self._done_cb = done_callback
self._done_cb_list = []
self.add_callback(done_callback)
self._condition = threading.Condition()
self._canceled = False
self._done = False
self._response = None
self._results = None
self._exception = pre_exception
self._callback_called = False # callback function should be called only once
self._kwargs = kwargs
def add_callback(self, func: Callable):
self._done_cb_list.append(func)
def __del__(self) -> None:
self._future = None
@abc.abstractmethod
def on_response(self, response: Callable):
"""Parse response from gRPC server and return results."""
raise NotImplementedError
def _callback(self):
if not self._callback_called:
for cb in self._done_cb_list:
if cb:
# necessary to check parameter signature of cb?
if isinstance(self._results, tuple):
cb(*self._results)
elif _parameter_is_empty(cb):
cb()
elif self._results is not None:
cb(self._results)
else:
raise MilvusException(message="callback function is not legal!")
self._callback_called = True
def result(self, **kwargs):
self.exception()
with self._condition:
# future not finished. wait callback being called.
to = kwargs.get("timeout")
if to is None:
to = self._kwargs.get("timeout", None)
if self._future and self._results is None:
try:
self._response = self._future.result(timeout=to)
except Exception as e:
raise MilvusException(message=str(e)) from e
self._results = self.on_response(self._response)
self._callback()
self._done = True
self._condition.notify_all()
self.exception()
if kwargs.get("raw", False) is True:
# just return response object received from gRPC
return self._response
if self._results:
return self._results
return self.on_response(self._response)
def cancel(self):
with self._condition:
if self._future:
self._future.cancel()
self._condition.notify_all()
def is_done(self):
return self._done
def done(self):
with self._condition:
if self._future and self._results is None:
try:
self._response = self._future.result()
self._results = self.on_response(self._response)
self._callback() # https://github.com/milvus-io/milvus/issues/6160
except Exception as e:
self._exception = e
self._done = True
self._condition.notify_all()
def exception(self):
if self._exception:
raise self._exception
if self._future:
self._future.exception()
|
Future
|
python
|
vyperlang__vyper
|
vyper/venom/analysis/dfg.py
|
{
"start": 325,
"end": 3898
}
|
class ____(IRAnalysis):
_dfg_inputs: dict[IRVariable, OrderedSet[IRInstruction]]
_dfg_outputs: dict[IRVariable, IRInstruction]
def __init__(self, analyses_cache: IRAnalysesCache, function: IRFunction):
super().__init__(analyses_cache, function)
self._dfg_inputs = dict()
self._dfg_outputs = dict()
# return uses of a given variable
def get_uses(self, op: IRVariable) -> OrderedSet[IRInstruction]:
return self._dfg_inputs.get(op, OrderedSet())
def get_uses_in_bb(self, op: IRVariable, bb: IRBasicBlock):
"""
Get uses of a given variable in a specific basic block.
"""
return [inst for inst in self.get_uses(op) if inst.parent == bb]
# the instruction which produces this variable.
def get_producing_instruction(self, op: IROperand) -> Optional[IRInstruction]:
if not isinstance(op, IRVariable):
return None
return self._dfg_outputs.get(op)
def set_producing_instruction(self, op: IRVariable, inst: IRInstruction):
# should this check if inst.output is already in dfg_outputs?
self._dfg_outputs[op] = inst
def remove_producing_instruction(self, op: IRVariable):
del self._dfg_outputs[op]
def add_use(self, op: IRVariable, inst: IRInstruction):
uses = self._dfg_inputs.setdefault(op, OrderedSet())
uses.add(inst)
def remove_use(self, op: IRVariable, inst: IRInstruction):
uses: OrderedSet = self._dfg_inputs.get(op, OrderedSet())
uses.remove(inst)
def are_equivalent(self, var1: IROperand, var2: IROperand) -> bool:
if var1 == var2:
return True
if isinstance(var1, IRVariable) and isinstance(var2, IRVariable):
var1 = self._traverse_assign_chain(var1)
var2 = self._traverse_assign_chain(var2)
return var1 == var2
def _traverse_assign_chain(self, var: IRVariable) -> IRVariable:
while True:
inst = self.get_producing_instruction(var)
if inst is None or inst.opcode != "assign":
return var
var = inst.operands[0] # type: ignore
@property
def outputs(self) -> dict[IRVariable, IRInstruction]:
return self._dfg_outputs
def analyze(self):
# Build DFG
# %15 = add %13 %14
# %16 = iszero %15
# dfg_outputs of %15 is (%15 = add %13 %14)
# dfg_inputs of %15 is all the instructions which *use* %15, ex. [(%16 = iszero %15), ...]
for bb in self.function.get_basic_blocks():
for inst in bb.instructions:
operands = inst.get_input_variables()
res = inst.get_outputs()
for op in operands:
inputs = self._dfg_inputs.setdefault(op, OrderedSet())
inputs.add(inst)
for op in res:
self._dfg_outputs[op] = inst
def as_graph(self) -> str:
"""
Generate a graphviz representation of the dfg
"""
lines = ["digraph dfg_graph {"]
for var, inputs in self._dfg_inputs.items():
for input in inputs:
for op in input.get_outputs():
lines.append(f' " {var.name} " -> " {op.name} "')
lines.append("}")
return "\n".join(lines)
def invalidate(self):
self.analyses_cache.invalidate_analysis(LivenessAnalysis)
del self._dfg_inputs
del self._dfg_outputs
def __repr__(self) -> str:
return self.as_graph()
|
DFGAnalysis
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py
|
{
"start": 62644,
"end": 62795
}
|
class ____(Qwen3MoePreTrainedModel):
config_class = Qwen3OmniMoeTextConfig
config = Qwen3OmniMoeTextConfig
|
Qwen3OmniMoeThinkerTextPreTrainedModel
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/run.py
|
{
"start": 4247,
"end": 18735
}
|
class ____:
"""The task runner for pdm project"""
TYPES = ("cmd", "shell", "call", "composite")
OPTIONS = ("env", "env_file", "help", "keep_going", "working_dir", "site_packages")
def __init__(self, project: Project, hooks: HookManager) -> None:
self.project = project
global_options = cast(
"TaskOptions",
self.project.scripts.get("_", {}) if self.project.scripts else {},
)
self.global_options = global_options.copy()
self.recreate_env = False
self.hooks = hooks
def _get_script_env(self, script_file: str) -> BaseEnvironment:
import hashlib
from pdm.cli.commands.venv.backends import BACKENDS
from pdm.environments import PythonEnvironment
from pdm.installers.core import install_requirements
from pdm.models.venv import get_venv_python
with open(script_file, encoding="utf8") as f:
metadata = read_script_metadata(f.read(), "script")
if not metadata:
return self.project.environment
tool_config = metadata.pop("tool", {})
script_project = self.project.core.create_project()
script_project.pyproject.set_data(
{"project": {"name": "temp-project", **metadata, "version": "0.0.0"}, "tool": tool_config}
)
venv_name = hashlib.md5(os.path.realpath(script_file).encode("utf-8"), usedforsecurity=False).hexdigest()
venv_backend = BACKENDS[script_project.config["venv.backend"]](script_project, None)
venv = venv_backend.get_location(None, venv_name)
with contextlib.ExitStack() as stack:
if venv.exists() and not self.recreate_env:
stack.enter_context(self.project.core.ui.open_spinner("[info]Reusing existing script environment"))
else:
stack.enter_context(self.project.core.ui.open_spinner("[info]Creating environment for script"))
venv = venv_backend.create(venv_name=venv_name, force=True)
self.project.core.ui.info(f"Script environment location: {venv}", verbosity=termui.Verbosity.DETAIL)
env = PythonEnvironment(script_project, python=get_venv_python(venv).as_posix())
script_project._python = env.interpreter
env.project = script_project # keep a strong reference to the project
if reqs := script_project.get_dependencies():
install_requirements(reqs, env, clean=True)
return env
def get_task(self, script_name: str) -> Task | None:
"""Get the task with the given name. Return None if not found."""
if script_name not in self.project.scripts:
return None
script = cast("str | Sequence[str] | Mapping[str,Any]", self.project.scripts[script_name])
if not isinstance(script, Mapping):
# Regard as the same as {cmd = ... }
kind = "cmd"
value = script
options = {}
else:
script = dict(script) # to remove the effect of tomlkit's container type.
for key in self.TYPES:
if key in script:
kind = key
value = cast("str | Sequence[str]", script.pop(key))
break
else:
raise PdmUsageError(f"Script type must be one of ({', '.join(self.TYPES)})")
options = script.copy()
unknown_options = set(options) - set(self.OPTIONS)
if unknown_options:
raise PdmUsageError(f"Unknown options for task {script_name}: {', '.join(unknown_options)}")
return Task(kind, script_name, value, cast("TaskOptions", options))
def expand_command(self, env: BaseEnvironment, command: str) -> str:
expanded_command = os.path.expanduser(command)
if expanded_command.replace(os.sep, "/").startswith(("./", "../")):
abspath = os.path.abspath(expanded_command)
if not os.path.isfile(abspath):
raise PdmUsageError(f"Command [success]'{command}'[/] is not a valid executable.")
return abspath
result = env.which(command)
if not result:
raise PdmUsageError(f"Command [success]'{command}'[/] is not found in your PATH.")
return result
def _run_process(
self,
args: Sequence[str] | str,
chdir: bool = False,
shell: bool = False,
site_packages: bool = False,
env: Mapping[str, str] | None = None,
env_file: EnvFileOptions | str | None = None,
working_dir: str | None = None,
) -> int:
"""Run command in a subprocess and return the exit code."""
import dotenv
from dotenv.main import resolve_variables
project = self.project
if not shell and args[0].endswith(".py"):
project_env = self._get_script_env(os.path.expanduser(args[0]))
else:
check_project_file(project)
project_env = project.environment
this_path = project_env.get_paths()["scripts"]
os.environ.update(project_env.process_env)
if env_file is not None:
if isinstance(env_file, str):
path = env_file
override = False
else:
path = env_file["override"]
override = True
project.core.ui.echo(
f"Loading .env file: [success]{env_file}[/]",
err=True,
verbosity=termui.Verbosity.DETAIL,
)
dotenv.load_dotenv(self.project.root / path, override=override)
if env:
os.environ.update(resolve_variables(env.items(), override=True))
if shell:
assert isinstance(args, str)
# environment variables will be expanded by shell
process_cmd: str | Sequence[str] = args
else:
assert isinstance(args, Sequence)
command, *args = (expand_env_vars(arg) for arg in args)
if command.endswith(".py"):
args = [command, *args]
command = str(project_env.interpreter.executable)
expanded_command = self.expand_command(project_env, command)
real_command = os.path.realpath(expanded_command)
process_cmd = [expanded_command, *args]
if (
project_env.is_local
and not site_packages
and (
os.path.basename(real_command).startswith("python")
or is_path_relative_to(expanded_command, this_path)
)
):
# The executable belongs to the local packages directory.
# Don't load system site-packages
os.environ["NO_SITE_PACKAGES"] = "1"
cwd = (project.root / working_dir) if working_dir else project.root if chdir else None
def forward_signal(signum: int, frame: FrameType | None) -> None:
if sys.platform == "win32" and signum == signal.SIGINT:
signum = signal.SIGTERM
process.send_signal(signum)
process_env = os.environ.copy()
process_env.update({"PDM_RUN_CWD": str(Path.cwd())})
handle_term = signal.signal(signal.SIGTERM, forward_signal)
handle_int = signal.signal(signal.SIGINT, forward_signal)
process = subprocess.Popen(process_cmd, cwd=cwd, shell=shell, bufsize=0, close_fds=False, env=process_env)
retcode = process.wait()
signal.signal(signal.SIGTERM, handle_term)
signal.signal(signal.SIGINT, handle_int)
return retcode
def run_task(
self, task: Task, args: Sequence[str] = (), opts: TaskOptions | None = None, seen: set[str] | None = None
) -> int:
"""Run the named task with the given arguments.
Args:
task: The task to run
args: The extra arguments passed to the task
opts: The options passed from parent if any
seen: The set of seen tasks to prevent recursive calls
"""
kind, _, value, options = task
shell = False
if kind == "cmd":
if isinstance(value, str):
cmd, interpolated = interpolate(value, args)
value = shlex.split(cmd)
else:
agg = [interpolate(part, args) for part in value]
interpolated = any(row[1] for row in agg)
# In case of multiple default, we need to split the resulting string.
parts: Iterator[list[str]] = (
shlex.split(part) if interpolated else [part] for part, interpolated in agg
)
# We flatten the nested list to obtain a list of arguments
value = list(itertools.chain(*parts))
args = value if interpolated else [*value, *args]
elif kind == "shell":
assert isinstance(value, str)
script, interpolated = interpolate(value, args)
args = script if interpolated else " ".join([script, *args])
shell = True
elif kind == "call":
assert isinstance(value, str)
module, _, func = value.partition(":")
if not module or not func:
raise PdmUsageError("Python callable must be in the form <module_name>:<callable_name>")
short_name = "_1"
if re.search(r"\(.*?\)", func) is None:
func += "()"
args = ["python", "-c", f"import sys, {module} as {short_name};sys.exit({short_name}.{func})", *list(args)]
elif kind == "composite":
assert isinstance(value, list)
self.display_task(task, args)
if kind == "composite":
args = list(args)
should_interpolate = any(RE_ARGS_PLACEHOLDER.search(script) for script in value)
should_interpolate = should_interpolate or any(RE_PDM_PLACEHOLDER.search(script) for script in value)
composite_code = 0
keep_going = options.pop("keep_going", False) if options else False
for script in value:
if should_interpolate:
script, _ = interpolate(script, args)
split = shlex.split(script)
cmd = split[0]
subargs = split[1:] + ([] if should_interpolate else args)
code = self.run(cmd, subargs, merge_options(options, opts), chdir=True, seen=seen)
if code != 0:
if not keep_going:
return code
composite_code = code
return composite_code
return self._run_process(args, chdir=True, shell=shell, **merge_options(self.global_options, options, opts)) # type: ignore[misc]
def display_task(self, task: Task, args: Sequence[str]) -> None:
"""Display a task given current verbosity and settings"""
is_verbose = self.project.core.ui.verbosity >= termui.Verbosity.DETAIL
if not (is_verbose or self.project.config.get("scripts.show_header")):
return
args = task.args if task.kind == "composite" else args
content = args if is_verbose else task.short_description
self.project.core.ui.echo(
f"Running {task}: [success]{content}[/]",
err=True,
verbosity=termui.Verbosity.NORMAL,
)
def run(
self,
command: str,
args: list[str],
opts: TaskOptions | None = None,
chdir: bool = False,
seen: set[str] | None = None,
) -> int:
"""Run a command or script with the given arguments."""
if command in self.hooks.skip:
return 0
if seen is None:
seen = set()
task = self.get_task(command)
if task is not None:
if task.kind == "composite":
if command in seen:
raise PdmUsageError(f"Script {command} is recursive.")
seen = {command, *seen}
self.hooks.try_emit("pre_script", script=command, args=args)
pre_task = self.get_task(f"pre_{command}")
if pre_task is not None and self.hooks.should_run(pre_task.name):
code = self.run_task(pre_task, opts=opts)
if code != 0:
return code
code = self.run_task(task, args, opts=opts, seen=seen)
if code != 0:
return code
post_task = self.get_task(f"post_{command}")
if post_task is not None and self.hooks.should_run(post_task.name):
code = self.run_task(post_task, opts=opts)
self.hooks.try_emit("post_script", script=command, args=args)
return code
else:
return self._run_process([command, *args], chdir=chdir, **merge_options(self.global_options, opts)) # type: ignore[misc]
def show_list(self) -> None:
if not self.project.scripts:
return
columns = ["Name", "Type", "Description"]
result = []
for name in sorted(self.project.scripts):
if name.startswith("_"):
continue
task = self.get_task(name)
assert task is not None
result.append(
(
f"[success]{name}[/]",
task.kind,
task.short_description,
)
)
self.project.core.ui.display_columns(result, columns)
def as_json(self) -> dict[str, Any]:
out = {}
for name in sorted(self.project.scripts):
if name == "_":
data = out["_"] = dict(name="_", kind="shared", help="Shared options", **self.global_options)
_fix_env_file(data)
continue
task = self.get_task(name)
assert task is not None
data = out[name] = {
"name": name,
"kind": task.kind,
"help": task.short_description,
"args": task.args, # type: ignore[dict-item]
}
data.update(**task.options)
_fix_env_file(data)
return out
def _fix_env_file(data: dict[str, Any]) -> dict[str, Any]:
env_file = data.get("env_file")
if isinstance(env_file, dict):
del data["env_file"]
data["env_file.override"] = env_file.get("override")
return data
|
TaskRunner
|
python
|
pyinstaller__pyinstaller
|
tests/functional/modules/pyi_testmod_relimp/F/__init__.py
|
{
"start": 540,
"end": 585
}
|
class ____:
name = 'pyi_testmod_relimp.F.H'
|
H
|
python
|
matplotlib__matplotlib
|
lib/matplotlib/testing/jpl_units/Epoch.py
|
{
"start": 157,
"end": 6100
}
|
class ____:
# Frame conversion offsets in seconds
# t(TO) = t(FROM) + allowed[ FROM ][ TO ]
allowed = {
"ET": {
"UTC": +64.1839,
},
"UTC": {
"ET": -64.1839,
},
}
def __init__(self, frame, sec=None, jd=None, daynum=None, dt=None):
"""
Create a new Epoch object.
Build an epoch 1 of 2 ways:
Using seconds past a Julian date:
# Epoch('ET', sec=1e8, jd=2451545)
or using a matplotlib day number
# Epoch('ET', daynum=730119.5)
= ERROR CONDITIONS
- If the input units are not in the allowed list, an error is thrown.
= INPUT VARIABLES
- frame The frame of the epoch. Must be 'ET' or 'UTC'
- sec The number of seconds past the input JD.
- jd The Julian date of the epoch.
- daynum The matplotlib day number of the epoch.
- dt A python datetime instance.
"""
if ((sec is None and jd is not None) or
(sec is not None and jd is None) or
(daynum is not None and
(sec is not None or jd is not None)) or
(daynum is None and dt is None and
(sec is None or jd is None)) or
(daynum is not None and dt is not None) or
(dt is not None and (sec is not None or jd is not None)) or
((dt is not None) and not isinstance(dt, DT.datetime))):
raise ValueError(
"Invalid inputs. Must enter sec and jd together, "
"daynum by itself, or dt (must be a python datetime).\n"
"Sec = %s\n"
"JD = %s\n"
"dnum= %s\n"
"dt = %s" % (sec, jd, daynum, dt))
_api.check_in_list(self.allowed, frame=frame)
self._frame = frame
if dt is not None:
daynum = date2num(dt)
if daynum is not None:
# 1-JAN-0001 in JD = 1721425.5
jd = float(daynum) + 1721425.5
self._jd = math.floor(jd)
self._seconds = (jd - self._jd) * 86400.0
else:
self._seconds = float(sec)
self._jd = float(jd)
# Resolve seconds down to [ 0, 86400)
deltaDays = math.floor(self._seconds / 86400)
self._jd += deltaDays
self._seconds -= deltaDays * 86400.0
def convert(self, frame):
if self._frame == frame:
return self
offset = self.allowed[self._frame][frame]
return Epoch(frame, self._seconds + offset, self._jd)
def frame(self):
return self._frame
def julianDate(self, frame):
t = self
if frame != self._frame:
t = self.convert(frame)
return t._jd + t._seconds / 86400.0
def secondsPast(self, frame, jd):
t = self
if frame != self._frame:
t = self.convert(frame)
delta = t._jd - jd
return t._seconds + delta * 86400
def _cmp(self, op, rhs):
"""Compare Epochs *self* and *rhs* using operator *op*."""
t = self
if self._frame != rhs._frame:
t = self.convert(rhs._frame)
if t._jd != rhs._jd:
return op(t._jd, rhs._jd)
return op(t._seconds, rhs._seconds)
__eq__ = functools.partialmethod(_cmp, operator.eq)
__ne__ = functools.partialmethod(_cmp, operator.ne)
__lt__ = functools.partialmethod(_cmp, operator.lt)
__le__ = functools.partialmethod(_cmp, operator.le)
__gt__ = functools.partialmethod(_cmp, operator.gt)
__ge__ = functools.partialmethod(_cmp, operator.ge)
def __add__(self, rhs):
"""
Add a duration to an Epoch.
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns the difference of ourselves and the input Epoch.
"""
t = self
if self._frame != rhs.frame():
t = self.convert(rhs._frame)
sec = t._seconds + rhs.seconds()
return Epoch(t._frame, sec, t._jd)
def __sub__(self, rhs):
"""
Subtract two Epoch's or a Duration from an Epoch.
Valid:
Duration = Epoch - Epoch
Epoch = Epoch - Duration
= INPUT VARIABLES
- rhs The Epoch to subtract.
= RETURN VALUE
- Returns either the duration between to Epoch's or the a new
Epoch that is the result of subtracting a duration from an epoch.
"""
# Delay-load due to circular dependencies.
import matplotlib.testing.jpl_units as U
# Handle Epoch - Duration
if isinstance(rhs, U.Duration):
return self + -rhs
t = self
if self._frame != rhs._frame:
t = self.convert(rhs._frame)
days = t._jd - rhs._jd
sec = t._seconds - rhs._seconds
return U.Duration(rhs._frame, days*86400 + sec)
def __str__(self):
"""Print the Epoch."""
return f"{self.julianDate(self._frame):22.15e} {self._frame}"
def __repr__(self):
"""Print the Epoch."""
return str(self)
@staticmethod
def range(start, stop, step):
"""
Generate a range of Epoch objects.
Similar to the Python range() method. Returns the range [
start, stop) at the requested step. Each element will be a
Epoch object.
= INPUT VARIABLES
- start The starting value of the range.
- stop The stop value of the range.
- step Step to use.
= RETURN VALUE
- Returns a list containing the requested Epoch values.
"""
elems = []
i = 0
while True:
d = start + i * step
if d >= stop:
break
elems.append(d)
i += 1
return elems
|
Epoch
|
python
|
numba__numba
|
numba/tests/test_listimpl.py
|
{
"start": 5294,
"end": 5811
}
|
class ____(object):
"""An iterator for the `List`.
"""
def __init__(self, parent):
self.parent = parent
itsize = self.parent.tc.numba_list_iter_sizeof()
self.it_state_buf = (ctypes.c_char_p * itsize)(0)
self.it = ctypes.cast(self.it_state_buf, ctypes.c_void_p)
self.parent.list_iter(self.it)
def __iter__(self):
return self
def __next__(self):
return self.parent.list_iter_next(self.it)
next = __next__ # needed for py2 only
|
ListIter
|
python
|
pytorch__pytorch
|
benchmarks/tensorexpr/normalization.py
|
{
"start": 1508,
"end": 1796
}
|
class ____(NormalizationBench):
def forward(self):
y = self.instance_norm(self.data)
return y
@staticmethod
def module():
return "instance_norm"
def is_supported(self):
return tensor_engine.is_supported(self.instance_norm)
|
InstanceNormBench
|
python
|
pola-rs__polars
|
py-polars/tests/unit/constructors/test_constructors.py
|
{
"start": 1695,
"end": 1775
}
|
class ____(pydantic.BaseModel):
d: datetime
e: float
f: str
|
_TestBazPD
|
python
|
doocs__leetcode
|
solution/2100-2199/2174.Remove All Ones With Row and Column Flips II/Solution.py
|
{
"start": 0,
"end": 967
}
|
class ____:
def removeOnes(self, grid: List[List[int]]) -> int:
m, n = len(grid), len(grid[0])
state = sum(1 << (i * n + j) for i in range(m) for j in range(n) if grid[i][j])
q = deque([state])
vis = {state}
ans = 0
while q:
for _ in range(len(q)):
state = q.popleft()
if state == 0:
return ans
for i in range(m):
for j in range(n):
if grid[i][j] == 0:
continue
nxt = state
for r in range(m):
nxt &= ~(1 << (r * n + j))
for c in range(n):
nxt &= ~(1 << (i * n + c))
if nxt not in vis:
vis.add(nxt)
q.append(nxt)
ans += 1
return -1
|
Solution
|
python
|
ray-project__ray
|
python/ray/experimental/channel/conftest.py
|
{
"start": 392,
"end": 2148
}
|
class ____:
"""
Barrier that blocks the given number of actors until all actors have
reached the barrier. This is used to mock out blocking NCCL ops.
"""
def __init__(self, num_actors=2):
self.num_actors = num_actors
self.condition = asyncio.Condition()
# Buffer for the data that is "sent" between the actors, each entry is
# one p2p op.
self.data = {}
# Buffer for the number of actors seen, each entry is one p2p op.
self.num_actors_seen = defaultdict(int)
# Add a new mock for the TorchTensorType.device property
device_property_patcher = mock.patch(
"ray.experimental.channel.torch_tensor_type.TorchTensorType.device",
new_callable=mock.PropertyMock,
return_value=Device.CPU,
)
device_property_patcher.start()
async def wait(self, idx: int, data=None):
"""
Wait at barrier until all actors have sent `idx`. One actor should
provide `data`, and this value will be returned by this method for all
other actors.
"""
async with self.condition:
if data is not None:
assert idx not in self.data, (self.data, self.num_actors_seen)
self.data[idx] = data
self.num_actors_seen[idx] += 1
if self.num_actors_seen[idx] == self.num_actors:
# Wake up all tasks waiting on this condition.
self.condition.notify_all()
else:
await self.condition.wait_for(
lambda: self.num_actors_seen[idx] == self.num_actors
)
if data is None:
data = self.data[idx]
return data
|
Barrier
|
python
|
pandas-dev__pandas
|
pandas/core/interchange/dataframe_protocol.py
|
{
"start": 404,
"end": 621
}
|
class ____(enum.IntEnum):
"""Integer enum for device type codes matching DLPack."""
CPU = 1
CUDA = 2
CPU_PINNED = 3
OPENCL = 4
VULKAN = 7
METAL = 8
VPI = 9
ROCM = 10
|
DlpackDeviceType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/design-sql.py
|
{
"start": 293,
"end": 1212
}
|
class ____(object):
def __init__(self, names, columns):
"""
:type names: List[str]
:type columns: List[int]
"""
self.__table = {name:[column] for name, column in itertools.izip(names, columns)}
def insertRow(self, name, row):
"""
:type name: str
:type row: List[str]
:rtype: None
"""
row.append("") # soft delete
self.__table[name].append(row)
def deleteRow(self, name, rowId):
"""
:type name: str
:type rowId: int
:rtype: None
"""
self.__table[name][rowId][-1] = "deleted" # soft delete
def selectCell(self, name, rowId, columnId):
"""
:type name: str
:type rowId: int
:type columnId: int
:rtype: str
"""
return self.__table[name][rowId][columnId-1] if self.__table[name][rowId][-1] == "" else ""
|
SQL
|
python
|
doocs__leetcode
|
solution/1900-1999/1986.Minimum Number of Work Sessions to Finish the Tasks/Solution.py
|
{
"start": 0,
"end": 525
}
|
class ____:
def minSessions(self, tasks: List[int], sessionTime: int) -> int:
n = len(tasks)
ok = [False] * (1 << n)
for i in range(1, 1 << n):
t = sum(tasks[j] for j in range(n) if i >> j & 1)
ok[i] = t <= sessionTime
f = [inf] * (1 << n)
f[0] = 0
for i in range(1, 1 << n):
j = i
while j:
if ok[j]:
f[i] = min(f[i], f[i ^ j] + 1)
j = (j - 1) & i
return f[-1]
|
Solution
|
python
|
pandas-dev__pandas
|
pandas/tests/dtypes/test_dtypes.py
|
{
"start": 7875,
"end": 14454
}
|
class ____(Base):
@pytest.fixture
def dtype(self):
"""
Class level fixture of dtype for TestDatetimeTZDtype
"""
return DatetimeTZDtype("ns", "US/Eastern")
def test_alias_to_unit_raises(self):
# 23990
with pytest.raises(ValueError, match="Passing a dtype alias"):
DatetimeTZDtype("datetime64[ns, US/Central]")
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(
TypeError, match="Cannot construct a 'DatetimeTZDtype' from"
):
DatetimeTZDtype("this is a bad string")
with pytest.raises(
TypeError, match="Cannot construct a 'DatetimeTZDtype' from"
):
DatetimeTZDtype("datetime64[ns, US/NotATZ]")
def test_hash_vs_equality(self, dtype):
# make sure that we satisfy is semantics
dtype2 = DatetimeTZDtype("ns", "US/Eastern")
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction_non_nanosecond(self):
res = DatetimeTZDtype("ms", "US/Eastern")
assert res.unit == "ms"
assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value
assert res.str == "|M8[ms]"
assert str(res) == "datetime64[ms, US/Eastern]"
assert res.base == np.dtype("M8[ms]")
def test_day_not_supported(self):
msg = "DatetimeTZDtype only supports s, ms, us, ns units"
with pytest.raises(ValueError, match=msg):
DatetimeTZDtype("D", "US/Eastern")
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
b = DatetimeTZDtype.construct_from_string("datetime64[ns, CET]")
assert issubclass(type(a), type(b))
def test_compat(self, dtype):
msg = "is_datetime64tz_dtype is deprecated"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
assert is_datetime64tz_dtype(dtype)
assert is_datetime64tz_dtype("datetime64[ns, US/Eastern]")
assert is_datetime64_any_dtype(dtype)
assert is_datetime64_any_dtype("datetime64[ns, US/Eastern]")
assert is_datetime64_ns_dtype(dtype)
assert is_datetime64_ns_dtype("datetime64[ns, US/Eastern]")
assert not is_datetime64_dtype(dtype)
assert not is_datetime64_dtype("datetime64[ns, US/Eastern]")
def test_construction_from_string(self, dtype):
result = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")
assert is_dtype_equal(dtype, result)
@pytest.mark.parametrize(
"string",
[
"foo",
"datetime64[ns, notatz]",
# non-nano unit
"datetime64[ps, UTC]",
# dateutil str that returns None from gettz
"datetime64[ns, dateutil/invalid]",
],
)
def test_construct_from_string_invalid_raises(self, string):
msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"
with pytest.raises(TypeError, match=re.escape(msg)):
DatetimeTZDtype.construct_from_string(string)
def test_construct_from_string_wrong_type_raises(self):
msg = "'construct_from_string' expects a string, got <class 'list'>"
with pytest.raises(TypeError, match=msg):
DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])
def test_is_dtype(self, dtype):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(dtype)
assert DatetimeTZDtype.is_dtype("datetime64[ns, US/Eastern]")
assert DatetimeTZDtype.is_dtype("M8[ns, US/Eastern]")
assert not DatetimeTZDtype.is_dtype("foo")
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype("ns", "US/Pacific"))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self, dtype):
assert is_dtype_equal(dtype, "datetime64[ns, US/Eastern]")
assert is_dtype_equal(dtype, "M8[ns, US/Eastern]")
assert is_dtype_equal(dtype, DatetimeTZDtype("ns", "US/Eastern"))
assert not is_dtype_equal(dtype, "foo")
assert not is_dtype_equal(dtype, DatetimeTZDtype("ns", "CET"))
assert not is_dtype_equal(
DatetimeTZDtype("ns", "US/Eastern"), DatetimeTZDtype("ns", "US/Pacific")
)
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
assert dtype == "M8[ns, US/Eastern]"
def test_basic(self, dtype):
msg = "is_datetime64tz_dtype is deprecated"
with tm.assert_produces_warning(DeprecationWarning, match=msg):
assert is_datetime64tz_dtype(dtype)
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr, name="A")
# dtypes
with tm.assert_produces_warning(DeprecationWarning, match=msg):
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype("float64"))
assert not is_datetime64tz_dtype(1.0)
def test_dst(self):
dr1 = date_range("2013-01-01", periods=3, tz="US/Eastern")
s1 = Series(dr1, name="A")
assert isinstance(s1.dtype, DatetimeTZDtype)
dr2 = date_range("2013-08-01", periods=3, tz="US/Eastern")
s2 = Series(dr2, name="A")
assert isinstance(s2.dtype, DatetimeTZDtype)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern"])
@pytest.mark.parametrize("constructor", ["M8", "datetime64"])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = f"{constructor}[ns, {tz}]"
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype("ns", tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
def test_tz_standardize(self):
# GH 24713
pytz = pytest.importorskip("pytz")
tz = pytz.timezone("US/Eastern")
dr = date_range("2013-01-01", periods=3, tz=tz)
dtype = DatetimeTZDtype("ns", dr.tz)
assert dtype.tz == tz
dtype = DatetimeTZDtype("ns", dr[0].tz)
assert dtype.tz == tz
|
TestDatetimeTZDtype
|
python
|
astropy__astropy
|
astropy/modeling/utils.py
|
{
"start": 7335,
"end": 10255
}
|
class ____(UserDict):
"""
Wrapper around UserDict to allow for better tracking of the Special
Operators for CompoundModels. This dictionary is structured so that
one cannot inadvertently overwrite an existing special operator.
Parameters
----------
unique_id: int
the last used unique_id for a SPECIAL OPERATOR
special_operators: dict
a dictionary containing the special_operators
Notes
-----
Direct setting of operators (`dict[key] = value`) into the
dictionary has been deprecated in favor of the `.add(name, value)`
method, so that unique dictionary keys can be generated and tracked
consistently.
"""
def __init__(self, unique_id=0, special_operators={}):
super().__init__(special_operators)
self._unique_id = unique_id
def _set_value(self, key, val):
if key in self:
raise ValueError(f'Special operator "{key}" already exists')
else:
super().__setitem__(key, val)
def __setitem__(self, key, val):
self._set_value(key, val)
warnings.warn(
DeprecationWarning(
"""
Special operator dictionary assignment has been deprecated.
Please use `.add` instead, so that you can capture a unique
key for your operator.
"""
)
)
def _get_unique_id(self):
self._unique_id += 1
return self._unique_id
def add(self, operator_name, operator):
"""
Adds a special operator to the dictionary, and then returns the
unique key that the operator is stored under for later reference.
Parameters
----------
operator_name: str
the name for the operator
operator: function
the actual operator function which will be used
Returns
-------
the unique operator key for the dictionary
`(operator_name, unique_id)`
"""
key = (operator_name, self._get_unique_id())
self._set_value(key, operator)
return key
DType = TypeVar("DType", bound=np.generic)
@overload
def quantity_asanyarray(a: Sequence[int]) -> NDArray[np.integer]: ...
@overload
def quantity_asanyarray(a: Sequence[int], dtype: DType) -> NDArray[DType]: ...
@overload
def quantity_asanyarray(a: Sequence[u.Quantity]) -> u.Quantity: ...
def quantity_asanyarray(
a: Sequence[int] | Sequence[u.Quantity], dtype: DType | None = None
) -> NDArray[np.integer] | NDArray[DType] | u.Quantity:
if (
not isinstance(a, np.ndarray)
and not np.isscalar(a)
and any(isinstance(x, u.Quantity) for x in a)
):
return u.Quantity(a, dtype=dtype)
else:
# skip over some dtype deprecation.
dtype = np.float64 if dtype is np.inexact else dtype
return np.asanyarray(a, dtype=dtype)
|
_SpecialOperatorsDict
|
python
|
celery__celery
|
celery/app/routes.py
|
{
"start": 1528,
"end": 4551
}
|
class ____:
"""Route tasks based on the :setting:`task_routes` setting."""
def __init__(self, routes=None, queues=None,
create_missing=False, app=None):
self.app = app
self.queues = {} if queues is None else queues
self.routes = [] if routes is None else routes
self.create_missing = create_missing
def route(self, options, name, args=(), kwargs=None, task_type=None):
kwargs = {} if not kwargs else kwargs
options = self.expand_destination(options) # expands 'queue'
if self.routes:
route = self.lookup_route(name, args, kwargs, options, task_type)
if route: # expands 'queue' in route.
return lpmerge(self.expand_destination(route), options)
if 'queue' not in options:
options = lpmerge(self.expand_destination(
self.app.conf.task_default_queue), options)
return options
def expand_destination(self, route):
# Route can be a queue name: convenient for direct exchanges.
if isinstance(route, str):
queue, route = route, {}
else:
# can use defaults from configured queue, but override specific
# things (like the routing_key): great for topic exchanges.
queue = route.pop('queue', None)
if queue:
if isinstance(queue, Queue):
route['queue'] = queue
else:
try:
route['queue'] = self.queues[queue]
except KeyError:
raise QueueNotFound(
f'Queue {queue!r} missing from task_queues')
return route
def lookup_route(self, name,
args=None, kwargs=None, options=None, task_type=None):
query = self.query_router
for router in self.routes:
route = query(router, name, args, kwargs, options, task_type)
if route is not None:
return route
def query_router(self, router, task, args, kwargs, options, task_type):
router = maybe_evaluate(router)
if hasattr(router, 'route_for_task'):
# pre 4.0 router class
return router.route_for_task(task, args, kwargs)
return router(task, args, kwargs, options, task=task_type)
def expand_router_string(router):
router = symbol_by_name(router)
if hasattr(router, 'route_for_task'):
# need to instantiate pre 4.0 router classes
router = router()
return router
def prepare(routes):
"""Expand the :setting:`task_routes` setting."""
def expand_route(route):
if isinstance(route, (Mapping, list, tuple)):
return MapRoute(route)
if isinstance(route, str):
return mlazy(expand_router_string, route)
return route
if routes is None:
return ()
if not isinstance(routes, (list, tuple)):
routes = (routes,)
return [expand_route(route) for route in routes]
|
Router
|
python
|
pallets__flask
|
src/flask/json/tag.py
|
{
"start": 5599,
"end": 5911
}
|
class ____(JSONTag):
__slots__ = ()
key = " d"
def check(self, value: t.Any) -> bool:
return isinstance(value, datetime)
def to_json(self, value: t.Any) -> t.Any:
return http_date(value)
def to_python(self, value: t.Any) -> t.Any:
return parse_date(value)
|
TagDateTime
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_indexing.py
|
{
"start": 944,
"end": 18868
}
|
class ____(TestCase):
def test_index_no_floats(self):
a = np.array([[[5]]])
assert_raises(IndexError, lambda: a[0.0])
assert_raises(IndexError, lambda: a[0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0])
assert_raises(IndexError, lambda: a[0.0, :])
assert_raises(IndexError, lambda: a[:, 0.0])
assert_raises(IndexError, lambda: a[:, 0.0, :])
assert_raises(IndexError, lambda: a[0.0, :, :])
assert_raises(IndexError, lambda: a[0, 0, 0.0])
assert_raises(IndexError, lambda: a[0.0, 0, 0])
assert_raises(IndexError, lambda: a[0, 0.0, 0])
assert_raises(IndexError, lambda: a[-1.4])
assert_raises(IndexError, lambda: a[0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0])
assert_raises(IndexError, lambda: a[-1.4, :])
assert_raises(IndexError, lambda: a[:, -1.4])
assert_raises(IndexError, lambda: a[:, -1.4, :])
assert_raises(IndexError, lambda: a[-1.4, :, :])
assert_raises(IndexError, lambda: a[0, 0, -1.4])
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
assert_raises(IndexError, lambda: a[0, -1.4, 0])
# Note torch validates index arguments "depth-first", so will prioritise
# raising TypeError over IndexError, e.g.
#
# >>> a = np.array([[[5]]])
# >>> a[0.0:, 0.0]
# IndexError: only integers, slices (`:`), ellipsis (`...`),
# numpy.newaxis # (`None`) and integer or boolean arrays are
# valid indices
# >>> t = torch.as_tensor([[[5]]]) # identical to a
# >>> t[0.0:, 0.0]
# TypeError: slice indices must be integers or None or have an
# __index__ method
#
assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0])
assert_raises((IndexError, TypeError), lambda: a[0.0:, 0.0, :])
def test_slicing_no_floats(self):
a = np.array([[5]])
# start as float.
assert_raises(TypeError, lambda: a[0.0:])
assert_raises(TypeError, lambda: a[0:, 0.0:2])
assert_raises(TypeError, lambda: a[0.0::2, :0])
assert_raises(TypeError, lambda: a[0.0:1:2, :])
assert_raises(TypeError, lambda: a[:, 0.0:])
# stop as float.
assert_raises(TypeError, lambda: a[:0.0])
assert_raises(TypeError, lambda: a[:0, 1:2.0])
assert_raises(TypeError, lambda: a[:0.0:2, :0])
assert_raises(TypeError, lambda: a[:0.0, :])
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
# step as float.
assert_raises(TypeError, lambda: a[::1.0])
assert_raises(TypeError, lambda: a[0:, :2:2.0])
assert_raises(TypeError, lambda: a[1::4.0, :0])
assert_raises(TypeError, lambda: a[::5.0, :])
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
# mixed.
assert_raises(TypeError, lambda: a[1.0:2:2.0])
assert_raises(TypeError, lambda: a[1.0::2.0])
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
assert_raises(TypeError, lambda: a[1.0:5.0:5.0, :])
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
# should still get the DeprecationWarning if step = 0.
assert_raises(TypeError, lambda: a[::0.0])
@skip(reason="torch allows slicing with non-0d array components")
def test_index_no_array_to_index(self):
# No non-scalar arrays.
a = np.array([[[1]]])
assert_raises(TypeError, lambda: a[a:a:a])
# Conversely, using scalars doesn't raise in NumPy, e.g.
#
# >>> i = np.int64(1)
# >>> a[i:i:i]
# array([], shape=(0, 1, 1), dtype=int64)
#
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
@skip
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].tensor._base is a.tensor)
a = np.array(0)
assert_(isinstance(a[()], np.int_))
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uint8) # i.e. cast to default uint indexing dtype
arr = np.arange(10)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)
assert_array_equal(arr, np.arange(10))
arr = np.arange(10).reshape(5, 2)
assert_array_equal(arr[index], arr[u_index])
arr[u_index] = np.arange(5)[:, None]
assert_array_equal(arr, np.arange(5)[:, None].repeat(2, axis=1))
arr = np.arange(25).reshape(5, 5)
assert_array_equal(arr[u_index, u_index], arr[index, index])
def test_empty_fancy_index(self):
# Empty list index creates an empty array
# with the same dtype (but with weird shape)
a = np.array([1, 2, 3])
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([], dtype=np.intp)
assert_equal(a[[]], [])
assert_equal(a[[]].dtype, a.dtype)
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0, :])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = np.array(1)
b[(Ellipsis,)] = 2
assert_equal(b, 2)
@xpassIfTorchDynamo_np # 'torch_.np.array() does not have base attribute.
def test_ellipsis_index_2(self):
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_(a[...] is not a)
assert_equal(a[...], a)
# `a[...]` was `a` in numpy <1.9.
assert_(a[...].base is a)
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces IndexError
# Note torch raises RuntimeError here
assert_raises((IndexError, ValueError), a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_equal(a[np.array(True)], a[None])
assert_equal(a[np.array(False)], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = np.ones((5, 4, 3))
index = np.array([True])
assert_raises(IndexError, arr.__getitem__, index)
index = np.array([False] * 6)
assert_raises(IndexError, arr.__getitem__, index)
index = np.zeros((4, 4), dtype=bool)
assert_raises(IndexError, arr.__getitem__, index)
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = np.array([[0.0, 0.0, 0.0]])
b = np.array([True], dtype=bool)
assert_equal(a[b], a)
# boolean assignment
a[b] = 1.0
assert_equal(a, [[1.0, 1.0, 1.0]])
@skip(reason="NP_VER: fails on CI")
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = np.arange(4)
def f(a, v):
a[a > -1] = v
assert_raises((RuntimeError, ValueError, TypeError), f, a, [])
assert_raises((RuntimeError, ValueError, TypeError), f, a, [1, 2, 3])
assert_raises((RuntimeError, ValueError, TypeError), f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
b = np.array([[True, False, True], [False, True, False], [True, False, True]])
assert_equal(a[b], [1, 3, 5, 7, 9])
assert_equal(a[b[1]], [[4, 5, 6]])
assert_equal(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
assert_equal(a, [[0, 2, 0], [4, 0, 6], [0, 8, 0]])
def test_boolean_indexing_list(self):
# Regression test for #13715. It's a use-after-free bug which the
# test won't directly catch, but it will show up in valgrind.
a = np.array([1, 2, 3])
b = [True, False, True]
# Two variants of the test because the first takes a fast path
assert_equal(a[b], [1, 3])
assert_equal(a[None, b], [[1, 3]])
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
a = np.ones(5)
b = np.zeros(5, dtype=np.intp)[::-1]
c = np.arange(5)[::-1]
a[b] = c
# If the strides are not reversed, the 0 in the arange comes last.
assert_equal(a[0], 0)
# This also tests that the subspace buffer is initialized:
a = np.ones((5, 2))
c = np.arange(10).reshape(5, 2)[::-1]
a[b, :] = c
assert_equal(a[0], [0, 1])
def test_reversed_strides_result_allocation(self):
# Test a bug when calculating the output strides for a result array
# when the subspace size was 1 (and test other cases as well)
a = np.arange(10)[:, None]
i = np.arange(10)[::-1]
assert_array_equal(a[i], a[i.copy("C")])
a = np.arange(20).reshape(-1, 2)
def test_uncontiguous_subspace_assignment(self):
# During development there was a bug activating a skip logic
# based on ndim instead of size.
a = np.full((3, 4, 2), -1)
b = np.full((3, 4, 2), -1)
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
assert_equal(a, b)
@skip(reason="torch does not limit dims to 32")
def test_too_many_fancy_indices_special_case(self):
# Just documents behaviour, this is a small limitation.
a = np.ones((1,) * 32) # 32 is NPY_MAXDIMS
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 32)
def test_scalar_array_bool(self):
# NumPy bools can be used as boolean index (python ones as of yet not)
a = np.array(1)
assert_equal(a[np.bool_(True)], a[np.array(True)])
assert_equal(a[np.bool_(False)], a[np.array(False)])
# After deprecating bools as integers:
# a = np.array([0,1,2])
# assert_equal(a[True, :], a[None, :])
# assert_equal(a[:, True], a[:, None])
#
# assert_(not np.may_share_memory(a, a[True, :]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = np.arange(5)
assert_(a is not a[()])
assert_(a is not a[...])
assert_(a is not a[:])
def test_broaderrors_indexing(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = np.zeros(5)
ind = np.ones(20, dtype=np.intp)
ind[-1] = 10
assert_raises(IndexError, a.__getitem__, ind)
assert_raises((IndexError, RuntimeError), a.__setitem__, ind, 0)
ind = np.ones(20, dtype=np.intp)
ind[0] = 11
assert_raises(IndexError, a.__getitem__, ind)
assert_raises((IndexError, RuntimeError), a.__setitem__, ind, 0)
def test_trivial_fancy_not_possible(self):
# Test that the fast path for trivial assignment is not incorrectly
# used when the index is not contiguous or 1D, see also gh-11467.
a = np.arange(6)
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
assert_array_equal(a[idx], idx)
# this case must not go into the fast path, note that idx is
# a non-contiguous none 1D array here.
a[idx] = -1
res = np.arange(6)
res[0] = -1
res[3] = -1
assert_array_equal(a, res)
def test_memory_order(self):
# This is not necessary to preserve. Memory layouts for
# more complex indices are not as simple.
a = np.arange(10)
b = np.arange(10).reshape(5, 2).T
assert_(a[b].flags.f_contiguous)
# Takes a different implementation branch:
a = a.reshape(-1, 1)
assert_(a[b, 0].flags.f_contiguous)
@skipIfTorchDynamo() # XXX: flaky, depends on implementation details
def test_small_regressions(self):
# Reference count of intp for index checks
a = np.array([0])
if HAS_REFCOUNT:
refcount = sys.getrefcount(np.dtype(np.intp))
# item setting always checks indices in separate function:
a[np.array([0], dtype=np.intp)] = 1
a[np.array([0], dtype=np.uint8)] = 1
assert_raises(IndexError, a.__setitem__, np.array([1], dtype=np.intp), 1)
assert_raises(IndexError, a.__setitem__, np.array([1], dtype=np.uint8), 1)
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
def test_tuple_subclass(self):
arr = np.ones((5, 5))
# A tuple subclass should also be an nd-index
class TupleSubclass(tuple):
__slots__ = ()
index = ([1], [1])
index = TupleSubclass(index)
assert_(arr[index].shape == (1,))
# Unlike the non nd-index:
assert_(arr[index,].shape != (1,))
@xpassIfTorchDynamo_np # (reason="XXX: low-prio behaviour to support")
def test_broken_sequence_not_nd_index(self):
# See https://github.com/numpy/numpy/issues/5063
# If we have an object which claims to be a sequence, but fails
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
class SequenceLike:
def __index__(self):
return 0
def __len__(self):
return 1
def __getitem__(self, item):
raise IndexError("Not possible")
arr = np.arange(10)
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
# also test that field indexing does not segfault
# for a similar reason, by indexing a structured array
arr = np.zeros((1,), dtype=[("f1", "i8"), ("f2", "i8")])
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
def test_indexing_array_weird_strides(self):
# See also gh-6221
# the shapes used here come from the issue and create the correct
# size for the iterator buffering size.
x = np.ones(10)
x2 = np.ones((10, 2))
ind = np.arange(10)[:, None, None, None]
ind = np.broadcast_to(ind, (10, 55, 4, 4))
# single advanced index case
assert_array_equal(x[ind], x[ind.copy()])
# higher dimensional advanced index
zind = np.zeros(4, dtype=np.intp)
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
def test_indexing_array_negative_strides(self):
# From gh-8264,
# core dumps if negative strides are used in iteration
arro = np.zeros((4, 4)) # codespell:ignore
arr = arro[::-1, ::-1] # codespell:ignore
slices = (slice(None), [0, 1, 2, 3])
arr[slices] = 10
assert_array_equal(arr, 10.0)
@parametrize("index", [True, False, np.array([0])])
@parametrize("num", [32, 40])
@parametrize("original_ndim", [1, 32])
def test_too_many_advanced_indices(self, index, num, original_ndim):
# These are limitations based on the number of arguments we can process.
# For `num=32` (and all boolean cases), the result is actually define;
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
if not (isinstance(index, np.ndarray) and original_ndim < num):
# unskipped cases fail because of assigning too many indices
raise SkipTest("torch does not limit dims to 32")
arr = np.ones((1,) * original_ndim)
with pytest.raises(IndexError):
arr[(index,) * num]
with pytest.raises(IndexError):
arr[(index,) * num] = 1.0
def test_nontuple_ndindex(self):
a = np.arange(25).reshape((5, 5))
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
raise SkipTest(
"torch happily consumes non-tuple sequences with multi-axis "
"indices (i.e. slices) as an index, whereas NumPy invalidates "
"them, assumedly to keep things simple. This invalidation "
"behaviour is just too niche to bother emulating."
)
assert_raises(IndexError, a.__getitem__, [slice(None)])
@instantiate_parametrized_tests
|
TestIndexing
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py
|
{
"start": 36850,
"end": 38092
}
|
class ____(Benchmark):
r"""
Stochastic objective function.
This class defines the Stochastic [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Stochastic}}(x) = \sum_{i=1}^{n} \epsilon_i
\left | {x_i - \frac{1}{i}} \right |
The variable :math:`\epsilon_i, (i=1,...,n)` is a random variable uniformly
distributed in :math:`[0, 1]`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 5]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = [1/n]` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[1.0 / _ for _ in range(1, self.N + 1)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
rnd = uniform(0.0, 1.0, size=(self.N, ))
i = arange(1, self.N + 1)
return sum(rnd * abs(x - 1.0 / i))
|
Stochastic
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
|
{
"start": 2153,
"end": 2666
}
|
class ____:
def __new__(cls: type[Self], *args: Any, **kwargs: Any) -> Self:
...
@abstractmethod
def __str__(self) -> str:
...
@abc.abstractmethod
def __repr__(self) -> str:
...
def __eq__(self, other: object) -> bool:
...
def __ne__(self, obj: object) -> int:
...
def __enter__(self: Self) -> Self:
...
async def __aenter__(self: Self) -> Self:
...
def __ior__(self: Self, other: Self) -> Self:
...
|
Good
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 1480566,
"end": 1494370
}
|
class ____(TopLevelSpec):
"""
TopLevelUnitSpec schema wrapper.
Parameters
----------
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
mark : dict, :class:`Mark`, :class:`AnyMark`, :class:`BoxPlot`, :class:`MarkDef`, :class:`ErrorBar`, :class:`ErrorBand`, :class:`BoxPlotDef`, :class:`ErrorBarDef`, :class:`ErrorBandDef`, :class:`CompositeMark`, :class:`CompositeMarkDef`, Literal['arc', 'area', 'bar', 'image', 'line', 'point', 'rect', 'rule', 'text', 'tick', 'trail', 'circle', 'square', 'geoshape', 'boxplot', 'errorband', 'errorbar']
A string describing the mark type (one of ``"bar"``, ``"circle"``, ``"square"``,
``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"rule"``, ``"geoshape"``, and
``"text"``) or a `mark definition object
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
align : dict, :class:`LayoutAlign`, :class:`RowColLayoutAlign`, Literal['all', 'each', 'none']
The alignment to apply to grid rows and columns. The supported string values are
``"all"``, ``"each"``, and ``"none"``.
* For ``"none"``, a flow layout will be used, in which adjacent subviews are simply
placed one after the other.
* For ``"each"``, subviews will be aligned into a clean grid structure, but each row
or column may be of variable size.
* For ``"all"``, subviews will be aligned and each row or column will be sized
identically based on the maximum observed size. String values for this property
will be applied to both grid rows and columns.
Alternatively, an object value of the form ``{"row": string, "column": string}`` can
be used to supply different alignments for rows and columns.
**Default value:** ``"all"``.
autosize : dict, :class:`AutosizeType`, :class:`AutoSizeParams`, Literal['pad', 'none', 'fit', 'fit-x', 'fit-y']
How the visualization size should be determined. If a string, should be one of
``"pad"``, ``"fit"`` or ``"none"``. Object values can additionally specify
parameters for content sizing and automatic resizing.
**Default value**: ``pad``
background : str, dict, :class:`Color`, :class:`ExprRef`, :class:`HexColor`, :class:`ColorName`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
CSS color property to use as the background of the entire view.
**Default value:** ``"white"``
bounds : Literal['full', 'flush']
The bounds calculation method to use for determining the extent of a sub-plot. One
of ``full`` (the default) or ``flush``.
* If set to ``full``, the entire calculated bounds (including axes, title, and
legend) will be used.
* If set to ``flush``, only the specified width and height values for the sub-view
will be used. The ``flush`` setting can be useful when attempting to place
sub-plots without axes or legends into a uniform grid structure.
**Default value:** ``"full"``
center : bool, dict, :class:`RowColboolean`
Boolean flag indicating if subviews should be centered relative to their respective
rows or columns.
An object value of the form ``{"row": boolean, "column": boolean}`` can be used to
supply different centering values for rows and columns.
**Default value:** ``false``
config : dict, :class:`Config`
Vega-Lite configuration object. This property can only be defined at the top-level
of a specification.
datasets : dict, :class:`Datasets`
A global data store for named datasets. This is a mapping from names to inline
datasets. This can be an array of objects or primitive values or a string. Arrays of
primitive values are ingested as objects with a ``data`` property.
description : str
Description of this mark for commenting purpose.
encoding : dict, :class:`FacetedEncoding`
A key-value mapping between encoding channels and definition of fields.
height : dict, float, :class:`Step`, Literal['container']
The height of a visualization.
* For a plot with a continuous y-field, height should be a number.
* For a plot with either a discrete y-field or no y-field, height can be either a
number indicating a fixed height or an object in the form of ``{step: number}``
defining the height per discrete step. (No y-field is equivalent to having one
discrete step.)
* To enable responsive sizing on height, it should be set to ``"container"``.
**Default value:** Based on ``config.view.continuousHeight`` for a plot with a
continuous y-field and ``config.view.discreteHeight`` otherwise.
**Note:** For plots with `row and column channels
<https://vega.github.io/vega-lite/docs/encoding.html#facet>`__, this represents the
height of a single view and the ``"container"`` option cannot be used.
**See also:** `height <https://vega.github.io/vega-lite/docs/size.html>`__
documentation.
name : str
Name of the visualization for later reference.
padding : dict, float, :class:`ExprRef`, :class:`Padding`
The default visualization padding, in pixels, from the edge of the visualization
canvas to the data rectangle. If a number, specifies padding for all sides. If an
object, the value should have the format ``{"left": 5, "top": 5, "right": 5,
"bottom": 5}`` to specify padding for each side of the visualization.
**Default value**: ``5``
params : Sequence[dict, :class:`TopLevelParameter`, :class:`VariableParameter`, :class:`TopLevelSelectionParameter`]
An array of parameters that may either be simple variables, or more complex
selections that map user input to data queries.
projection : dict, :class:`Projection`
An object defining properties of geographic projection, which will be applied to
``shape`` path for ``"geoshape"`` marks and to ``latitude`` and ``"longitude"``
channels for other marks.
resolve : dict, :class:`Resolve`
Scale, axis, and legend resolutions for view composition specifications.
spacing : dict, float, :class:`RowColnumber`
The spacing in pixels between sub-views of the composition operator. An object of
the form ``{"row": number, "column": number}`` can be used to set different spacing
values for rows and columns.
**Default value**: Depends on ``"spacing"`` property of `the view composition
configuration <https://vega.github.io/vega-lite/docs/config.html#view-config>`__
(``20`` by default)
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
usermeta : dict, :class:`Dict`
Optional metadata that will be passed to Vega. This object is completely ignored by
Vega and Vega-Lite and can be used for custom metadata.
view : dict, :class:`ViewBackground`
An object defining the view background's fill and stroke.
**Default value:** none (transparent)
width : dict, float, :class:`Step`, Literal['container']
The width of a visualization.
* For a plot with a continuous x-field, width should be a number.
* For a plot with either a discrete x-field or no x-field, width can be either a
number indicating a fixed width or an object in the form of ``{step: number}``
defining the width per discrete step. (No x-field is equivalent to having one
discrete step.)
* To enable responsive sizing on width, it should be set to ``"container"``.
**Default value:** Based on ``config.view.continuousWidth`` for a plot with a
continuous x-field and ``config.view.discreteWidth`` otherwise.
**Note:** For plots with `row and column channels
<https://vega.github.io/vega-lite/docs/encoding.html#facet>`__, this represents the
width of a single view and the ``"container"`` option cannot be used.
**See also:** `width <https://vega.github.io/vega-lite/docs/size.html>`__
documentation.
$schema : str
URL to `JSON schema <http://json-schema.org/>`__ for a Vega-Lite specification.
Unless you have a reason to change this, use
``https://vega.github.io/schema/vega-lite/v6.json``. Setting the ``$schema``
property allows automatic validation and autocomplete in editors that support JSON
schema.
"""
_schema = {"$ref": "#/definitions/TopLevelUnitSpec"}
def __init__(
self,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
mark: Optional[SchemaBase | Map | Mark_T | CompositeMark_T] = Undefined,
align: Optional[SchemaBase | Map | LayoutAlign_T] = Undefined,
autosize: Optional[SchemaBase | Map | AutosizeType_T] = Undefined,
background: Optional[
str | Parameter | SchemaBase | Map | ColorName_T
] = Undefined,
bounds: Optional[Literal["full", "flush"]] = Undefined,
center: Optional[bool | SchemaBase | Map] = Undefined,
config: Optional[SchemaBase | Map] = Undefined,
datasets: Optional[SchemaBase | Map] = Undefined,
description: Optional[str] = Undefined,
encoding: Optional[SchemaBase | Map] = Undefined,
height: Optional[float | SchemaBase | Literal["container"] | Map] = Undefined,
name: Optional[str] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
projection: Optional[SchemaBase | Map] = Undefined,
resolve: Optional[SchemaBase | Map] = Undefined,
spacing: Optional[float | SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
usermeta: Optional[SchemaBase | Map] = Undefined,
view: Optional[SchemaBase | Map] = Undefined,
width: Optional[float | SchemaBase | Literal["container"] | Map] = Undefined,
**kwds,
):
super().__init__(
data=data,
mark=mark,
align=align,
autosize=autosize,
background=background,
bounds=bounds,
center=center,
config=config,
datasets=datasets,
description=description,
encoding=encoding,
height=height,
name=name,
padding=padding,
params=params,
projection=projection,
resolve=resolve,
spacing=spacing,
title=title,
transform=transform,
usermeta=usermeta,
view=view,
width=width,
**kwds,
)
|
TopLevelUnitSpec
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/dataclass4.py
|
{
"start": 1803,
"end": 1864
}
|
class ____(DC10):
a: str = field()
b: bool = field()
|
DC11
|
python
|
numpy__numpy
|
numpy/_core/code_generators/genapi.py
|
{
"start": 13048,
"end": 13748
}
|
class ____:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return f" (void *) &{self.name}"
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
|
BoolValuesApi
|
python
|
jackfrued__Python-100-Days
|
Day31-35/code/example02.py
|
{
"start": 812,
"end": 4597
}
|
class ____(object):
"""人"""
def __init__(self, name, age):
self.name = name
self.age = age
# def __gt__(self, other):
# return self.name > other.name
def __str__(self):
return f'{self.name}: {self.age}'
def __repr__(self):
return self.__str__()
def select_sort(origin_items, comp=lambda x, y: x < y):
"""简单选择排序"""
items = origin_items[:]
for i in range(len(items) - 1):
min_index = i
for j in range(i + 1, len(items)):
if comp(items[j], items[min_index]):
min_index = j
items[i], items[min_index] = items[min_index], items[i]
return items
# 函数的设计要尽量做到无副作用(不影响调用者)
# 9 1 2 3 4 5 6 7 8
# 9 2 3 4 5 6 7 8 1
# *前面的参数叫位置参数,传参时只需要对号入座即可
# *后面的参数叫命名关键字参数,传参时必须给出参数名和参数值
# *args - 可变参数 - 元组
# **kwargs - 关键字参数 - 字典
def bubble_sort(origin_items, *, comp=lambda x, y: x > y):
"""冒泡排序"""
items = origin_items[:]
for i in range(1, len(items)):
swapped = False
for j in range(i - 1, len(items) - i):
if comp(items[j], items[j + 1]):
items[j], items[j + 1] = items[j + 1], items[j]
swapped = True
if swapped:
swapped = False
for j in range(len(items) - i - 1, i - 1, -1):
if comp(items[j - 1], items[j]):
items[j], items[j - 1] = items[j - 1], items[j]
swapped = True
if not swapped:
break
return items
def merge_sort(items, comp=lambda x, y: x <= y):
"""归并排序"""
if len(items) < 2:
return items[:]
mid = len(items) // 2
left = merge_sort(items[:mid], comp)
right = merge_sort(items[mid:], comp)
return merge(left, right, comp)
def merge(items1, items2, comp=lambda x, y: x <= y):
"""合并(将两个有序列表合并成一个新的有序列表)"""
items = []
index1, index2 = 0, 0
while index1 < len(items1) and index2 < len(items2):
if comp(items1[index1], items2[index2]):
items.append(items1[index1])
index1 += 1
else:
items.append(items2[index2])
index2 += 1
items += items1[index1:]
items += items2[index2:]
return items
def quick_sort(origin_items, comp=lambda x, y: x <= y):
"""快速排序"""
items = origin_items[:]
_quick_sort(items, 0, len(items) - 1, comp)
return items
def _quick_sort(items, start, end, comp):
"""递归调用划分和排序"""
if start < end:
pos = _partition(items, start, end, comp)
_quick_sort(items, start, pos - 1, comp)
_quick_sort(items, pos + 1, end, comp)
def _partition(items, start, end, comp):
"""划分"""
pivot = items[end]
i = start - 1
for j in range(start, end):
if comp(items[j], pivot):
i += 1
items[i], items[j] = items[j], items[i]
items[i + 1], items[end] = items[end], items[i + 1]
return i + 1
def main():
"""主函数"""
items = [35, 97, 12, 68, 55, 73, 81, 40]
# print(bubble_sort(items))
# print(select_sort(items))
# print(merge_sort(items))
print(quick_sort(items))
items2 = [
Person('Wang', 25), Person('Luo', 39),
Person('Zhang', 50), Person('He', 20)
]
# print(bubble_sort(items2, comp=lambda p1, p2: p1.age > p2.age))
# print(select_sort(items2, comp=lambda p1, p2: p1.name < p2.name))
# print(merge_sort(items2, comp=lambda p1, p2: p1.age <= p2.age))
print(quick_sort(items2, comp=lambda p1, p2: p1.age <= p2.age))
items3 = ['apple', 'orange', 'watermelon', 'durian', 'pear']
# print(bubble_sort(items3))
# print(bubble_sort(items3, comp=lambda x, y: len(x) > len(y)))
# print(merge_sort(items3))
print(merge_sort(items3))
if __name__ == '__main__':
main()
|
Person
|
python
|
huggingface__transformers
|
src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py
|
{
"start": 20985,
"end": 22650
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([XLMRobertaXLLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
# Extra layernorm at the end (causes high fluctuations between different attentions)
hidden_states = self.LayerNorm(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
|
XLMRobertaXLEncoder
|
python
|
miyuchina__mistletoe
|
mistletoe/span_token.py
|
{
"start": 5591,
"end": 6504
}
|
class ____(SpanToken):
"""
Autolink token. ("<http://www.google.com>")
This is an inline token with a single child of type RawText.
Attributes:
children (list): a single RawText node for the link target.
target (str): link target.
mailto (bool): true iff the target looks like an email address, but does not have the "mailto:" prefix.
"""
repr_attributes = ("target", "mailto")
pattern = re.compile(r"(?<!\\)(?:\\\\)*<([A-Za-z][A-Za-z0-9+.-]{1,31}:[^ <>]*?|[A-Za-z0-9.!#$%&'*+/=?^_`{|}~-]+@[A-Za-z0-9](?:[A-Za-z0-9-]{0,61}[A-Za-z0-9])?(?:\.[A-Za-z0-9](?:[A-Za-z0-9-]{0,61}[A-Za-z0-9])?)*)>")
parse_inner = False
def __init__(self, match):
content = match.group(self.parse_group)
self.children = (RawText(content),)
self.target = content
self.mailto = '@' in self.target and 'mailto' not in self.target.casefold()
|
AutoLink
|
python
|
openai__openai-python
|
src/openai/types/beta/threads/file_path_annotation.py
|
{
"start": 303,
"end": 552
}
|
class ____(BaseModel):
end_index: int
file_path: FilePath
start_index: int
text: str
"""The text in the message content that needs to be replaced."""
type: Literal["file_path"]
"""Always `file_path`."""
|
FilePathAnnotation
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 865820,
"end": 867601
}
|
class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"content",
"created_at",
"creator",
"database_id",
"field_values",
"is_archived",
"project",
"title",
"type",
"updated_at",
)
content = sgqlc.types.Field("ProjectNextItemContent", graphql_name="content")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field(Actor, graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
field_values = sgqlc.types.Field(
sgqlc.types.non_null(ProjectNextItemFieldValueConnection),
graphql_name="fieldValues",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
is_archived = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isArchived"
)
project = sgqlc.types.Field(
sgqlc.types.non_null(ProjectNext), graphql_name="project"
)
title = sgqlc.types.Field(String, graphql_name="title")
type = sgqlc.types.Field(sgqlc.types.non_null(ProjectItemType), graphql_name="type")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
|
ProjectNextItem
|
python
|
python-pillow__Pillow
|
Tests/test_imagewin.py
|
{
"start": 620,
"end": 2728
}
|
class ____:
def test_dib_image(self) -> None:
# Arrange
im = hopper()
# Act
dib = ImageWin.Dib(im)
# Assert
assert dib.size == im.size
def test_dib_mode_string(self) -> None:
# Arrange
mode = "RGBA"
size = (128, 128)
# Act
dib = ImageWin.Dib(mode, size)
# Assert
assert dib.size == (128, 128)
with pytest.raises(ValueError):
ImageWin.Dib(mode)
def test_dib_hwnd(self) -> None:
mode = "RGBA"
size = (128, 128)
wnd = 0
dib = ImageWin.Dib(mode, size)
hwnd = ImageWin.HWND(wnd)
dib.expose(hwnd)
dib.draw(hwnd, (0, 0) + size)
assert isinstance(dib.query_palette(hwnd), int)
def test_dib_paste(self) -> None:
# Arrange
im = hopper()
mode = "RGBA"
size = (128, 128)
dib = ImageWin.Dib(mode, size)
# Act
dib.paste(im)
# Assert
assert dib.size == (128, 128)
def test_dib_paste_bbox(self) -> None:
# Arrange
im = hopper()
bbox = (0, 0, 10, 10)
mode = "RGBA"
size = (128, 128)
dib = ImageWin.Dib(mode, size)
# Act
dib.paste(im, bbox)
# Assert
assert dib.size == (128, 128)
def test_dib_frombytes_tobytes_roundtrip(self) -> None:
# Arrange
# Make two different DIB images
im = hopper()
dib1 = ImageWin.Dib(im)
mode = "RGB"
size = (128, 128)
dib2 = ImageWin.Dib(mode, size)
# Confirm they're different
assert dib1.tobytes() != dib2.tobytes()
# Act
# Make one the same as the using tobytes()/frombytes()
test_buffer = dib1.tobytes()
for datatype in ("bytes", "memoryview"):
if datatype == "memoryview":
test_buffer = memoryview(test_buffer)
dib2.frombytes(test_buffer)
# Assert
# Confirm they're the same
assert dib1.tobytes() == dib2.tobytes()
|
TestImageWinDib
|
python
|
pytorch__pytorch
|
test/test_serialization.py
|
{
"start": 40010,
"end": 40301
}
|
class ____:
def __init__(self, num):
self.num = num
def __reduce_ex__(self, proto):
# Third item, state here will cause pickle to push a BUILD instruction
return ClassThatUsesBuildInstruction, (self.num,), {'foo': 'bar'}
@dataclass
|
ClassThatUsesBuildInstruction
|
python
|
aio-libs__aiohttp
|
aiohttp/http_parser.py
|
{
"start": 2566,
"end": 2705
}
|
class ____(IntEnum):
PARSE_CHUNKED_SIZE = 0
PARSE_CHUNKED_CHUNK = 1
PARSE_CHUNKED_CHUNK_EOF = 2
PARSE_TRAILERS = 4
|
ChunkState
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/command/test_subcommand.py
|
{
"start": 1074,
"end": 2552
}
|
class ____(sc.Subcommand):
def invoke(self, args): pass
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_is_abstract() -> None:
with pytest.raises(TypeError):
_Bad()
def test_missing_args() -> None:
p = MagicMock()
_Good(p)
p.add_argument.assert_not_called()
def test_no_args() -> None:
_Good.args = ()
p = MagicMock()
_Good(p)
p.add_argument.assert_not_called()
def test_one_arg() -> None:
_Good.args = (
("foo", sc.Argument(nargs=1, help="foo")),
)
p = MagicMock()
_Good(p)
assert p.add_argument.call_count == 1
def test_args() -> None:
_Good.args = (
("foo", sc.Argument(nargs=1, help="foo")),
("bar", sc.Argument(nargs=2, help="bar")),
)
p = MagicMock()
_Good(p)
assert p.add_argument.call_count == 2
def test_base_invoke() -> None:
with pytest.raises(NotImplementedError):
p = MagicMock()
obj = _Good(p)
super(_Good, obj).invoke("foo") # note super special case
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
_Good
|
python
|
huggingface__transformers
|
tests/pipelines/test_pipelines_audio_classification.py
|
{
"start": 1175,
"end": 8569
}
|
class ____(unittest.TestCase):
model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_dataset = None
@classmethod
def _load_dataset(cls):
# Lazy loading of the dataset. Because it is a class method, it will only be loaded once per pytest process.
if cls._dataset is None:
cls._dataset = datasets.load_dataset(
"hf-internal-testing/librispeech_asr_dummy", "clean", split="validation"
)
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
audio_classifier = AudioClassificationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
)
# test with a raw waveform
audio = np.zeros((34000,))
audio2 = np.zeros((14000,))
return audio_classifier, [audio2, audio]
def run_pipeline_test(self, audio_classifier, examples):
audio2, audio = examples
output = audio_classifier(audio)
# by default a model is initialized with num_labels=2
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
output = audio_classifier(audio, top_k=1)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
],
)
self.run_torchaudio(audio_classifier)
for single_output in output:
compare_pipeline_output_to_hub_spec(single_output, AudioClassificationOutputElement)
@require_torchaudio
def run_torchaudio(self, audio_classifier):
self._load_dataset()
# test with a local file
audio = self._dataset[0]["audio"]["array"]
output = audio_classifier(audio)
self.assertEqual(
output,
[
{"score": ANY(float), "label": ANY(str)},
{"score": ANY(float), "label": ANY(str)},
],
)
@require_torch
def test_small_model_pt(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
EXPECTED_OUTPUT = [
{"score": 0.0842, "label": "no"},
{"score": 0.0838, "label": "up"},
{"score": 0.0837, "label": "go"},
{"score": 0.0834, "label": "right"},
]
EXPECTED_OUTPUT_PT_2 = [
{"score": 0.0845, "label": "stop"},
{"score": 0.0844, "label": "on"},
{"score": 0.0841, "label": "right"},
{"score": 0.0834, "label": "left"},
]
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
output = audio_classifier(audio_dict, top_k=4)
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
def test_small_model_pt_fp16(self):
model = "anton-l/wav2vec2-random-tiny-classifier"
audio_classifier = pipeline("audio-classification", model=model, dtype=torch.float16)
audio = np.ones((8000,))
output = audio_classifier(audio, top_k=4)
# Expected outputs are collected running the test on torch 2.6 in few scenarios.
# Running on CUDA T4/A100 and on XPU PVC (note: using stock torch xpu, NOT using IPEX):
EXPECTED_OUTPUT = [
{"score": 0.0833, "label": "go"},
{"score": 0.0833, "label": "off"},
{"score": 0.0833, "label": "stop"},
{"score": 0.0833, "label": "on"},
]
# Running on CPU:
EXPECTED_OUTPUT_PT_2 = [
{"score": 0.0839, "label": "no"},
{"score": 0.0837, "label": "go"},
{"score": 0.0836, "label": "yes"},
{"score": 0.0835, "label": "right"},
]
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
output = audio_classifier(audio_dict, top_k=4)
self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def test_large_model_pt(self):
model = "superb/wav2vec2-base-superb-ks"
audio_classifier = pipeline("audio-classification", model=model)
dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test")
audio = np.array(dataset[3]["speech"], dtype=np.float32)
output = audio_classifier(audio, top_k=4)
self.assertEqual(
nested_simplify(output, decimals=3),
[
{"score": 0.981, "label": "go"},
{"score": 0.007, "label": "up"},
{"score": 0.006, "label": "_unknown_"},
{"score": 0.001, "label": "down"},
],
)
@require_torch
@slow
def test_top_k_none_returns_all_labels(self):
model_name = "superb/wav2vec2-base-superb-ks" # model with more than 5 labels
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=None,
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should return all labels when top_k is None")
@require_torch
@slow
def test_top_k_none_with_few_labels(self):
model_name = "superb/hubert-base-superb-er" # model with fewer labels
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=None,
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should handle models with fewer labels correctly")
@require_torch
@slow
def test_top_k_greater_than_labels(self):
model_name = "superb/hubert-base-superb-er"
classification_pipeline = pipeline(
"audio-classification",
model=model_name,
top_k=100, # intentionally large number
)
# Create dummy input
sampling_rate = 16000
signal = np.zeros((sampling_rate,), dtype=np.float32)
result = classification_pipeline(signal)
num_labels = classification_pipeline.model.config.num_labels
self.assertEqual(len(result), num_labels, "Should cap top_k to number of labels")
|
AudioClassificationPipelineTests
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/links/translate.py
|
{
"start": 5206,
"end": 5532
}
|
class ____(BaseGoogleLink):
"""
Helper class for constructing Translation Datasets List link.
Both legacy and native datasets are available under this link.
"""
name = "Translation Dataset List"
key = "translation_dataset_list"
format_str = TRANSLATION_DATASET_LIST_LINK
|
TranslationDatasetsListLink
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_hashes.py
|
{
"start": 3671,
"end": 4394
}
|
class ____:
test_blake2s = generate_base_hash_test(
hashes.BLAKE2s(digest_size=32),
digest_size=32,
)
def test_invalid_digest_size(self, backend):
with pytest.raises(ValueError):
hashes.BLAKE2s(digest_size=33)
with pytest.raises(ValueError):
hashes.BLAKE2s(digest_size=0)
with pytest.raises(ValueError):
hashes.BLAKE2s(digest_size=-1)
def test_buffer_protocol_hash(backend):
data = binascii.unhexlify(b"b4190e")
h = hashes.Hash(hashes.SHA256(), backend)
h.update(bytearray(data))
assert h.finalize() == binascii.unhexlify(
b"dff2e73091f6c05e528896c4c831b9448653dc2ff043528f6769437bc7b975c2"
)
|
TestBLAKE2s
|
python
|
milvus-io__pymilvus
|
pymilvus/client/types.py
|
{
"start": 25990,
"end": 28134
}
|
class ____:
"""
Represents information about a node in the system.
Attributes:
node_id (int): The ID of the node.
address (str): The ip address of the node.
hostname (str): The hostname of the node.
Example:
NodeInfo(
node_id=1,
address="127.0.0.1",
hostname="localhost",
)
"""
def __init__(self, info: Any) -> None:
self._node_id = info.node_id
self._address = info.address
self._hostname = info.hostname
def __repr__(self) -> str:
return f"""NodeInfo:
<node_id:{self.node_id}>,
<address:{self.address}>,
<hostname:{self.hostname}>"""
@property
def node_id(self) -> int:
return self._node_id
@property
def address(self) -> str:
return self._address
@property
def hostname(self) -> str:
return self._hostname
ResourceGroupConfig = rg_pb2.ResourceGroupConfig
"""
Represents the configuration of a resource group.
Attributes:
requests (ResourceGroupLimit): The requests of the resource group.
limits (ResourceGroupLimit): The limits of the resource group.
transfer_from (List[ResourceGroupTransfer]): The transfer config that resource group
can transfer node from the resource group of this field at high priority.
transfer_to (List[ResourceGroupTransfer]): The transfer config that resource group
can transfer node to the resource group of this field at high priority.
Example:
ResourceGroupConfig(
requests={"node_num": 1},
limits={"node_num": 5},
transfer_from=[{"resource_group": "__default_resource_group"}],
transfer_to=[{"resource_group": "resource_group_2"}],
)
"""
ResourceGroupLimit = rg_pb2.ResourceGroupLimit
"""
Represents the limit of a resource group.
Attributes:
node_num (int): The number of nodes that the resource group can hold.
"""
ResourceGroupTransfer = rg_pb2.ResourceGroupTransfer
"""
Represents the transfer config of a resource group.
Attributes:
resource_group (str): The name of the resource group that can be transferred to or from.
"""
|
NodeInfo
|
python
|
pytorch__pytorch
|
test/inductor/test_flex_attention.py
|
{
"start": 154110,
"end": 181635
}
|
class ____(torch.nn.Module):
def forward(self, primals_1: "f64[2, 2, 128, 4]", primals_2: "f64[2, 2, 128, 4]", primals_3: "f64[2, 2, 128, 4]", full: "i32[1, 1, 1]", full_default: "i32[1, 1, 1, 1]", convert_element_type: "i32[1, 1, 1]", convert_element_type_1: "i32[1, 1, 1, 1]", getitem_2: "f64[2, 2, 128, 4]", getitem_3: "f32[2, 2, 128]", tangents_1: "f64[2, 2, 128, 4]"):
full_default_4: "f32[2, 2, 128]" = torch.ops.aten.full.default([2, 2, 128], 0, dtype = torch.float32, layout = torch.strided, device = device(type='cuda', index=0), pin_memory = False)
fw_graph0 = self.fw_graph0
joint_graph0 = self.joint_graph0
mask_graph0 = self.mask_graph0
flex_attention_backward = torch.ops.higher_order.flex_attention_backward(primals_1, primals_2, primals_3, getitem_2, getitem_3, tangents_1, full_default_4, fw_graph0, joint_graph0, (1, 1, full, full_default, None, None, convert_element_type, convert_element_type_1, None, None, 1073741824, 1073741824, mask_graph0), 0.5, {'BACKEND': 'AUTO', 'PRESCALE_QK': False, 'ROWS_GUARANTEED_SAFE': False, 'BLOCKS_ARE_CONTIGUOUS': False, 'WRITE_DQ': True, 'OUTPUT_LOGSUMEXP': True, 'OUTPUT_MAX': False}, (), ()); primals_1 = primals_2 = primals_3 = getitem_2 = getitem_3 = tangents_1 = full_default_4 = fw_graph0 = joint_graph0 = full = full_default = convert_element_type = convert_element_type_1 = mask_graph0 = None
getitem_5: "f64[2, 2, 128, 4]" = flex_attention_backward[0]
getitem_6: "f64[2, 2, 128, 4]" = flex_attention_backward[1]
getitem_7: "f64[2, 2, 128, 4]" = flex_attention_backward[2]; flex_attention_backward = None
return (getitem_5, getitem_6, getitem_7)
class fw_graph0(torch.nn.Module):
def forward(self, arg0_1: "f64[]", arg1_1: "i32[]", arg2_1: "i32[]", arg3_1: "i32[]", arg4_1: "i32[]"):
mul: "f64[]" = torch.ops.aten.mul.Tensor(arg0_1, arg0_1); arg0_1 = None
return mul
class joint_graph0(torch.nn.Module):
def forward(self, arg0_1: "f64[]", arg1_1: "i32[]", arg2_1: "i32[]", arg3_1: "i32[]", arg4_1: "i32[]", arg5_1: "f64[]"):
mul_1: "f64[]" = torch.ops.aten.mul.Tensor(arg5_1, arg0_1)
mul_2: "f64[]" = torch.ops.aten.mul.Tensor(arg5_1, arg0_1); arg5_1 = arg0_1 = None
add: "f64[]" = torch.ops.aten.add.Tensor(mul_2, mul_1); mul_2 = mul_1 = None
return [add, None, None, None, None]
class mask_graph0(torch.nn.Module):
def forward(self, arg0_1: "i32[]", arg1_1: "i32[]", arg2_1: "i32[]", arg3_1: "i32[]"):
full_default: "b8[]" = torch.ops.aten.full.default([], True, dtype = torch.bool, layout = torch.strided, device = device(type='cuda', index=0), pin_memory = False)
return full_default
""".replace( # noqa: B950
"GPU_TYPE", torch.device(device).type
),
)
@supported_platform
def test_tensor_subclass_dispatch_order(self, device):
"""Test that tensor subclasses get proper dispatch priority over modes.
This test verifies the fix that allows tensor subclasses' pyimpl to run before
FakeTensorMode/FunctionalTensorMode implementations, preventing issues
where subclasses that error on as_strided would fail in flex_attention.
"""
import torch.utils._pytree as pytree
from torch.utils._python_dispatch import return_and_correct_aliasing
class AsStridedErrorTensor(torch.Tensor):
@staticmethod
def __new__(cls, elem):
assert isinstance(elem, torch.Tensor)
return torch.Tensor._make_wrapper_subclass(
cls,
elem.shape,
strides=elem.stride(),
storage_offset=elem.storage_offset(),
dtype=elem.dtype,
layout=elem.layout,
device=elem.device,
requires_grad=elem.requires_grad,
)
def __init__(self, elem):
self.elem = elem
def __repr__(self):
return f"AsStridedErrorTensor({self.elem})"
def __tensor_flatten__(self):
return ["elem"], None
@staticmethod
def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride):
assert meta is None
elem = inner_tensors["elem"]
return AsStridedErrorTensor(elem)
@classmethod
def __torch_dispatch__(cls, func, types, args, kwargs=None):
# Error if as_strided is called
if func is torch.ops.aten.as_strided.default:
raise RuntimeError("as_strided was called on AsStridedErrorTensor!")
if kwargs is None:
kwargs = {}
args_elem = pytree.tree_map_only(
AsStridedErrorTensor, lambda x: x.elem, args
)
kwargs_elem = pytree.tree_map_only(
AsStridedErrorTensor, lambda x: x.elem, kwargs
)
out = func(*args_elem, **kwargs_elem)
def wrap_output(x):
if isinstance(x, torch.Tensor):
return AsStridedErrorTensor(x)
return x
out_wrapped = pytree.tree_map(wrap_output, out)
return return_and_correct_aliasing(func, args, kwargs, out_wrapped)
from torch._higher_order_ops.flex_attention import (
flex_attention as flex_attention_hop,
)
@flex_attention_hop.py_impl(AsStridedErrorTensor)
def flex_attention_as_strided_error_tensor(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers=(),
mask_mod_other_buffers=(),
):
inner_q, inner_k, inner_v = query.elem, key.elem, value.elem
out, lse, max_scores = flex_attention_hop(
inner_q,
inner_k,
inner_v,
score_mod,
block_mask,
scale,
kernel_options,
score_mod_other_buffers,
mask_mod_other_buffers,
)
return (
AsStridedErrorTensor(out),
AsStridedErrorTensor(lse),
AsStridedErrorTensor(max_scores),
)
# Test setup
B, H, S, D = 2, 1, 128, 16
dtype = torch.float32
# Create regular tensors
query_elem = torch.randn(B, H, S, D, device=device, dtype=dtype)
key_elem = torch.randn(B, H, S, D, device=device, dtype=dtype)
value_elem = torch.randn(B, H, S, D, device=device, dtype=dtype)
# Test 1: Verify as_strided raises error when called directly on AsStridedErrorTensor
test_tensor = AsStridedErrorTensor(query_elem)
with self.assertRaisesRegex(
RuntimeError, "as_strided was called on AsStridedErrorTensor!"
):
torch.as_strided(
test_tensor, size=(B, H, S, D), stride=test_tensor.stride()
)
# Test 2: Run flex_attention with normal tensors first
compiled_fn = torch.compile(flex_attention, backend="aot_eager")
normal_out, normal_lse = compiled_fn(
query_elem, key_elem, value_elem, return_lse=True
)
# Test 3: Wrap in our subclass
query = AsStridedErrorTensor(query_elem)
key = AsStridedErrorTensor(key_elem)
value = AsStridedErrorTensor(value_elem)
# This should NOT error with as_strided after the fix
# Before the fix, it would error because FakeTensorMode would directly
# call flex_attention_fake_impl which uses as_strided
out, lse = compiled_fn(query, key, value, return_lse=True)
# Verify we got valid output
self.assertIsInstance(out, AsStridedErrorTensor)
self.assertIsInstance(lse, AsStridedErrorTensor)
self.assertEqual(out.shape, (B, H, S, D))
self.assertEqual(lse.shape, (B, H, S))
# Test 4: Compare outputs between normal tensors and subclassed tensors
torch.testing.assert_close(out.elem, normal_out, rtol=1e-5, atol=1e-5)
torch.testing.assert_close(lse.elem, normal_lse, rtol=1e-5, atol=1e-5)
@supported_platform
@skip_on_cuda
def test_cpu_error_message_return_lse(self, device):
make_tensor = functools.partial(
torch.randn,
(2, 2, 128, 16),
device="cpu",
dtype=torch.float32,
requires_grad=False,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
attention = torch.compile(flex_attention)
with self.assertRaisesRegex(
torch._inductor.exc.InductorError,
r"NotImplementedError: torch.compile on CPU only supports inference and `return_lse` is not supported yet.",
):
attention(query, key, value, return_lse=True)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_device_cuda_1(self, device):
class TestModule(torch.nn.Module):
def forward(self, q, k, v, block_mask):
return flex_attention(q, k, v, block_mask=block_mask)
q = torch.randn(1, 1, 256, 32, device="cuda:1", dtype=torch.bfloat16)
k = torch.randn(1, 1, 256, 32, device="cuda:1", dtype=torch.bfloat16)
v = torch.randn(1, 1, 256, 32, device="cuda:1", dtype=torch.bfloat16)
mask = create_block_mask(
lambda b, h, q_idx, kv_idx: q_idx >= kv_idx,
B=None,
H=None,
Q_LEN=256,
KV_LEN=256,
device="cuda:1",
)
mod = torch.compile(TestModule())
attn_output = mod(q, k, v, mask)
self.assertEqual(attn_output.device, torch.device("cuda:1"))
@supported_platform
@skip_on_cpu
def test_custom_score_mod_layout_freeze(self, device):
torch.manual_seed(0)
class FlexAttentionCPB(nn.Module):
def __init__(self, N: int, R: int, H: int = 4, hidden: int = 32):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(2, hidden),
nn.GELU(),
nn.Linear(hidden, H, bias=False),
)
self.gamma = nn.Parameter(torch.zeros(H))
self.H = H
self._init_tables(N, R)
self.register_buffer(
"r_cutoff", torch.tensor(R, dtype=torch.long), persistent=False
)
def _init_tables(self, N: int, R: int) -> None:
P = N - R
S = int(P**0.5)
assert S * S == P
rng = torch.arange(-(S - 1), S, dtype=torch.float32)
dY, dX = torch.meshgrid(rng, rng, indexing="ij")
rel = torch.stack(
[dY / max(S - 1, 1), dX / max(S - 1, 1)], dim=-1
).reshape(-1, 2)
rel_table = torch.sign(rel) * torch.log1p(rel.abs())
self.register_buffer("rel_table", rel_table, persistent=False)
yy, xx = torch.arange(S), torch.arange(S)
Y, X = torch.meshgrid(yy, xx, indexing="ij")
flat = torch.stack([Y, X], 0).flatten(1)
d = flat[:, :, None] - flat[:, None, :]
d = d.permute(1, 2, 0).contiguous()
d[:, :, 0] += S - 1
d[:, :, 1] += S - 1
d[:, :, 0] *= 2 * S - 1
l_idx = d.sum(-1).to(torch.long)
idx = torch.full((N, N), 0, dtype=torch.long)
idx[R:, R:] = l_idx
self.register_buffer("idx_table", idx, persistent=False)
def _score_mod(self, mu: torch.Tensor):
bt = self.mlp(self.rel_table)
idx = self.idx_table
mu_q, mu_k = mu.unbind(2)
gam_sig = torch.sigmoid(self.gamma)
def score_mod(score, b, h, q, kv):
has_bias = (q >= self.r_cutoff) & (kv >= self.r_cutoff)
l2 = idx[q, kv]
bias = bt[l2, h]
w_gate = gam_sig[h] * (mu_q[b, h, q] + mu_k[b, h, kv])
return score + has_bias.to(score.dtype) * w_gate * bias
return score_mod
def forward(self, q, k, v, mu):
return flex_attention(q, k, v, score_mod=self._score_mod(mu))
dtype = torch.bfloat16 if PLATFORM_SUPPORTS_BF16 else torch.float16
device_obj = torch.device(device)
module = FlexAttentionCPB(N=18, R=2).to(device_obj)
compiled_module = torch.compile(module, backend="inductor", dynamic=False)
q = torch.randn(2, 4, 18, 32, device=device_obj, dtype=dtype)
k = torch.randn_like(q)
v = torch.randn_like(q)
mu = torch.randn(2, 4, 2, 18, device=device_obj)
with torch.no_grad():
with torch.nn.attention.sdpa_kernel(SDPBackend.FLASH_ATTENTION):
eager_out = module(q, k, v, mu)
compiled_out = compiled_module(q, k, v, mu)
self.assertEqual(compiled_out.shape, eager_out.shape)
torch.testing.assert_close(
compiled_out.float(), eager_out.float(), atol=2e-2, rtol=2e-2
)
@supported_platform
@skip_on_cpu
@common_utils.parametrize(
"ops_to_save",
[
[
torch.ops.aten.mm.default,
],
[
flex_attention_hop,
],
[torch.ops.aten.mm.default, flex_attention_hop],
],
)
def test_selective_ac(self, device, ops_to_save):
class FlexAttentionModule(nn.Module):
def __init__(self, hidden_size, num_heads):
super().__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.head_dim = hidden_size // num_heads
# In-projections (query, key, value)
self.q_proj = nn.Linear(hidden_size, hidden_size)
self.k_proj = nn.Linear(hidden_size, hidden_size)
self.v_proj = nn.Linear(hidden_size, hidden_size)
# Out-projection
self.out_proj = nn.Linear(hidden_size, hidden_size)
def forward(self, x):
batch_size, seq_len, _ = x.size()
# Project queries, keys, and values
q = (
self.q_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
k = (
self.k_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
v = (
self.v_proj(x)
.view(batch_size, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
)
# Apply flex attention
attn_output = flex_attention(
q,
k,
v,
)
# Reshape output
attn_output = (
attn_output.transpose(1, 2)
.contiguous()
.view(batch_size, seq_len, self.hidden_size)
)
# Out projection
output = self.out_proj(attn_output)
return output
from torch.utils.checkpoint import (
checkpoint,
create_selective_checkpoint_contexts,
)
context_fn = functools.partial(
create_selective_checkpoint_contexts, ops_to_save
)
# Define a model that uses FlexAttention with selective activation checkpointing
class SacModule(nn.Module):
def __init__(self, hidden_size, num_heads, context_fn):
super().__init__()
self.flex_attn = FlexAttentionModule(hidden_size, num_heads)
self.context_fn = context_fn
def forward(self, x):
def flex_attn_fn(x):
return self.flex_attn(x)
output = checkpoint(
flex_attn_fn,
x,
use_reentrant=False,
context_fn=self.context_fn,
)
return output
flex_module = SacModule(hidden_size=512, num_heads=8, context_fn=context_fn).to(
device, dtype=torch.bfloat16
)
x = torch.ones(8, 1024, 512, device=device, dtype=torch.bfloat16)
# Run without compilation
output_module = flex_module(x)
compiled_module = torch.compile(flex_module)
output_compiled = compiled_module(x)
torch.testing.assert_close(output_module, output_compiled, rtol=1e-2, atol=1e-2)
# Calculate gradients and compare them
x.requires_grad_(True)
output_module = flex_module(x)
output_compiled = compiled_module(x)
grad_output = torch.ones_like(output_module)
grad_module = torch.autograd.grad(
outputs=output_module, inputs=x, grad_outputs=grad_output, retain_graph=True
)[0]
grad_compiled = torch.autograd.grad(
outputs=output_compiled, inputs=x, grad_outputs=grad_output
)[0]
torch.testing.assert_close(grad_module, grad_compiled, rtol=1e-2, atol=1e-2)
@supported_platform
@skip_on_cpu
def test_selective_ac_with_max_autotune_short_query(self, device):
from functools import partial
from torch.utils.checkpoint import (
checkpoint,
CheckpointPolicy,
create_selective_checkpoint_contexts,
)
compute_intensive_ops = [
torch.ops.aten.mm,
torch.ops.aten.bmm,
]
def policy_fn(ctx, op, *args, **kwargs):
if op in compute_intensive_ops:
return CheckpointPolicy.MUST_SAVE
else:
return CheckpointPolicy.PREFER_RECOMPUTE
def causal_mask(b, h, q_idx, kv_idx):
return q_idx >= kv_idx
class DummyAttentionModule(nn.Module):
def __init__(self, dim=64, num_heads=4):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.q_proj = nn.Linear(dim, dim)
self.k_proj = nn.Linear(dim, dim)
self.v_proj = nn.Linear(dim, dim)
self.out_proj = nn.Linear(dim, dim)
self._activation_checkpoint_context_fn = partial(
create_selective_checkpoint_contexts, policy_fn
)
self._flex_attention = torch.compile(
partial(
checkpoint,
flex_attention,
use_reentrant=False,
context_fn=self._activation_checkpoint_context_fn,
),
mode="max-autotune-no-cudagraphs",
)
def forward(self, x, block_mask):
batch_size, seq_len, _ = x.shape
q = self.q_proj(x)
k = self.k_proj(x)
v = self.v_proj(x)
q = q.view(
batch_size, seq_len, self.num_heads, self.head_dim
).transpose(1, 2)
k = k.view(
batch_size, seq_len, self.num_heads, self.head_dim
).transpose(1, 2)
v = v.view(
batch_size, seq_len, self.num_heads, self.head_dim
).transpose(1, 2)
attn_out = self._flex_attention(q, k, v, block_mask=block_mask)
attn_out = (
attn_out.transpose(1, 2)
.contiguous()
.view(batch_size, seq_len, self.dim)
)
out = self.out_proj(attn_out)
return out
batch_size = 2
seq_len = 64
dim = 64
num_heads = 4
model = DummyAttentionModule(dim=dim, num_heads=num_heads).to(device)
x = torch.randn(batch_size, seq_len, dim, device=device, requires_grad=True)
block_mask = create_block_mask(
causal_mask,
B=batch_size,
H=num_heads,
Q_LEN=seq_len,
KV_LEN=seq_len,
device=device,
)
out = model(x, block_mask)
loss = out.sum()
loss.backward()
self.assertIsNotNone(x.grad)
@supported_platform
@skip_on_cpu
def test_validate_small_embedding_size_error_message(self, device):
# eager support for small embedding size
q, k, v = [torch.randn(2, 2, 128, 8, device=device) for _ in range(3)]
flex_attention(q, k, v)
# compiled cpu support for small embedding size
q, k, v = [torch.randn(2, 2, 128, 8, device=device) for _ in range(3)]
flex_attention(q, k, v)
# compiled gpu kernel does not support small embedding size
q, k, v = [torch.randn(2, 2, 128, 8, device=device) for _ in range(3)]
compiled_fa = torch.compile(flex_attention)
with self.assertRaisesRegex(
torch._inductor.exc.InductorError,
"NYI: embedding dimension of the query, key, and value must be "
"at least 16 but got E=8 and Ev=8",
):
compiled_fa(q, k, v)
# compiled gpu kernel supports large embedding size
q, k, v = [torch.randn(2, 2, 128, 16, device=device) for _ in range(3)]
compiled_fa = torch.compile(flex_attention)
@unittest.skipIf(
not has_triton() or not HAS_WARP_SPEC,
reason="FBCODE Triton is required for this test",
)
def test_triton_template_warp_specialization(self, device):
def make_tensor():
return torch.rand(4, 16, 4096, 64, device=device, dtype=torch.bfloat16)
q, k, v = make_tensor(), make_tensor(), make_tensor()
flex_compiled = torch.compile(flex_attention, fullgraph=True)
positional_args = (q, k, v)
keyword_args = {
"kernel_options": {
"num_warps": 4,
"num_consumer_groups": 2,
"num_buffers_warp_spec": 3,
}
}
# Check if kernel code contains warp specialization parameters
_, kernel_code = run_and_get_code(
flex_compiled,
*positional_args,
**keyword_args,
)
assert kernel_code is not None, "Failed to retrieve compiled kernel code"
assert "num_consumer_groups" in kernel_code[0], (
"num_consumer_groups missing in kernel definition"
)
assert "num_buffers_warp_spec" in kernel_code[0], (
"num_buffers_warp_spec missing in kernel definition"
)
# Validate correctness
C1 = flex_compiled(q, k, v)
C2 = flex_attention(q, k, v)
assert torch.allclose(C1, C2, atol=1e-2, rtol=1e-2), (
"Warp specialized kernel result differs from reference"
)
@supported_platform
@skip_on_cpu
@skipCUDAIf(not has_triton_tma_device(), "Requires TMA enabled CUDA device")
def test_tma_with_customer_kernel_options(self, device):
make_tensor = functools.partial(
torch.ones,
(1, 1, 256, 128),
device=device,
dtype=torch.bfloat16,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
kernel_options_1 = {
"BLOCK_M": 128,
"BLOCK_N": 128,
"USE_TMA": False,
}
kernel_options_2 = {"BLOCK_M": 128, "BLOCK_N": 128, "USE_TMA": True}
flex_compile = torch.compile(flex_attention, fullgraph=True, dynamic=True)
out_compiled = flex_compile(query, key, value, kernel_options=kernel_options_1)
out_tma_compiled = flex_compile(
query, key, value, kernel_options=kernel_options_2
)
# vanilla compiled vs TMA compiled
torch.testing.assert_close(out_tma_compiled, out_compiled, atol=2e-1, rtol=2e-1)
@supported_platform
@skip_on_cpu
def test_large_batch_heads_grid_dimension(self, device):
B, H, S, D = 22720, 3, 64, 32
make_tensor = functools.partial(
torch.randn,
(B, H, S, D),
device=device,
dtype=torch.float16,
requires_grad=True,
)
query, key, value = make_tensor(), make_tensor(), make_tensor()
flex_compile = torch.compile(flex_attention, fullgraph=True, dynamic=True)
out_compiled = flex_compile(query, key, value)
self.assertEqual(out_compiled.shape, (B, H, S, D))
grad_output = torch.randn_like(out_compiled)
out_compiled.backward(grad_output)
self.assertIsNotNone(query.grad)
self.assertIsNotNone(key.grad)
self.assertIsNotNone(value.grad)
self.assertEqual(query.grad.shape, query.shape)
self.assertEqual(key.grad.shape, key.shape)
self.assertEqual(value.grad.shape, value.shape)
@supported_platform
def test_debug_flag_disables_internal_compilation(self, device):
"""Test that _FLEX_ATTENTION_DISABLE_COMPILE_DEBUG flag bypasses internal compilation."""
import torch.nn.attention.flex_attention as fa
original_flag = fa._FLEX_ATTENTION_DISABLE_COMPILE_DEBUG
original_warnings_shown = fa._WARNINGS_SHOWN.copy()
try:
B, H, S, D = 1, 1, 128, 64
query = torch.randn(B, H, S, D, device=device, dtype=torch.float32)
key = torch.randn(B, H, S, D, device=device, dtype=torch.float32)
value = torch.randn(B, H, S, D, device=device, dtype=torch.float32)
def simple_score_mod(score, b, h, q_idx, kv_idx):
return score
# Test with debug flag False - should warn
fa._FLEX_ATTENTION_DISABLE_COMPILE_DEBUG = False
fa._WARNINGS_SHOWN.clear()
with self.assertWarns(UserWarning) as cm:
out_compiled = fa.flex_attention(
query, key, value, score_mod=simple_score_mod
)
self.assertIn(
"flex_attention called without torch.compile", str(cm.warning)
)
# Test with debug flag True - should NOT warn
fa._FLEX_ATTENTION_DISABLE_COMPILE_DEBUG = True
# Should not error
with warnings.catch_warnings():
warnings.simplefilter("error")
out_debug = fa.flex_attention(
query, key, value, score_mod=simple_score_mod
)
torch.testing.assert_close(out_compiled, out_debug, rtol=1e-4, atol=1e-4)
finally:
fa._FLEX_ATTENTION_DISABLE_COMPILE_DEBUG = original_flag
fa._WARNINGS_SHOWN = original_warnings_shown
|
GraphModule
|
python
|
django__django
|
tests/queries/tests.py
|
{
"start": 77767,
"end": 79181
}
|
class ____(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
p1 = Plaything.objects.create(name="p1")
self.assertSequenceEqual(Plaything.objects.all(), [p1])
def test_join_already_in_query(self):
# Ordering by model related to nullable relations should not change
# the join type of already existing joins.
Plaything.objects.create(name="p1")
s = SingleObject.objects.create(name="s")
r = RelatedObject.objects.create(single=s, f=1)
p2 = Plaything.objects.create(name="p2", others=r)
qs = Plaything.objects.filter(others__isnull=False).order_by("pk")
self.assertNotIn("JOIN", str(qs.query))
qs = Plaything.objects.filter(others__f__isnull=False).order_by("pk")
self.assertIn("INNER", str(qs.query))
qs = qs.order_by("others__single__name")
# The ordering by others__single__pk will add one new join (to single)
# and that join must be LEFT join. The already existing join to related
# objects must be kept INNER. So, we have both an INNER and a LEFT join
# in the query.
self.assertEqual(str(qs.query).count("LEFT"), 1)
self.assertEqual(str(qs.query).count("INNER"), 1)
self.assertSequenceEqual(qs, [p2])
|
NullableRelOrderingTests
|
python
|
getsentry__sentry
|
tests/acceptance/test_accept_organization_invite.py
|
{
"start": 510,
"end": 7983
}
|
class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.member = self.create_member(
user=None,
email="bar@example.com",
organization=self.org,
role="owner",
teams=[self.team],
)
def _sign_in_user(self, email: str, password: str) -> None:
"""
Helper method to sign in a user with given email and password.
"""
self.browser.find_element(By.ID, "id_username").send_keys(email)
self.browser.find_element(By.ID, "id_password").send_keys(password)
self.browser.find_element(By.XPATH, "//button[contains(text(), 'Sign In')]").click()
def test_invite_simple(self) -> None:
self.login_as(self.user)
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists('[data-test-id="join-organization"]')
def test_invite_not_authenticated(self) -> None:
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists('[data-test-id="create-account"]')
def test_invite_2fa_enforced_org(self) -> None:
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert not self.browser.element_exists_by_test_id("2fa-warning")
self.login_as(self.user)
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists_by_test_id("2fa-warning")
def test_invite_sso_org(self) -> None:
AuthProvider.objects.create(organization_id=self.org.id, provider="google")
self.browser.get(self.member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
assert self.browser.element_exists_by_test_id("action-info-sso")
assert self.browser.element_exists('[data-test-id="sso-login"]')
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
def test_authenticated_user_already_member_of_an_org_accept_invite_other_org(self) -> None:
"""
Test that an authenticated user already part of an organization can accept an invite to another organization.
"""
# Setup: Create a second user and make them a member of an organization
email = "dummy@example.com"
password = "dummy"
user2 = self.create_user(email=email)
user2.set_password(password)
user2.save()
self.create_organization(name="Second Org", owner=user2)
# Action: Invite User2 to the first organization
new_member = self.create_member(
user=None,
email=user2.email,
organization=self.org,
role="owner",
teams=[self.team],
)
self.login_as(user2)
# Simulate the user accessing the invite link
self.browser.get(new_member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
self.browser.click('button[data-test-id="join-organization"]')
assert self.browser.wait_until('[aria-label="Create project"]')
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
def test_not_authenticated_user_already_member_of_an_org_accept_invite_other_org(self) -> None:
"""
Test that a not authenticated user already part of an organization can accept an invite to another organization.
"""
# Setup: Create a second user and make them a member of an organization
email = "dummy@example.com"
password = "dummy"
user2 = self.create_user(email=email)
user2.set_password(password)
user2.save()
self.create_organization(name="Second Org", owner=user2)
# Action: Invite User2 to the first organization
new_member = self.create_member(
user=None,
email=user2.email,
organization=self.org,
role="member",
teams=[self.team],
)
# Simulate the user accessing the invite link
self.browser.get(new_member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
# Choose to login with existing account
self.browser.click('a[data-test-id="link-with-existing"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
# Handle form validation: Prevent default invalid event blocking
self.browser.driver.execute_script(
"document.addEventListener('invalid', function(e) { e.preventDefault(); }, true);"
)
# Login
self._sign_in_user(email, password)
self.browser.wait_until('[data-test-id="join-organization"]')
# Display the acceptance view for the invitation to join a new organization
assert self.browser.element_exists(f"[aria-label='Join the {self.org.slug} organization']")
@override_settings(SENTRY_SINGLE_ORGANIZATION=True)
def test_existing_user_invite_2fa_enforced_org(self) -> None:
"""
Test that a user who has an existing Sentry account can accept an invite to another organization
and is required to go through the 2FA configuration view.
"""
self.org.update(flags=F("flags").bitor(Organization.flags.require_2fa))
# Setup: Create a second user and make them a member of an organization
email = "dummy@example.com"
password = "dummy"
user2 = self.create_user(email=email)
user2.set_password(password)
user2.save()
self.create_organization(name="Second Org", owner=user2)
# Action: Invite User2 to the first organization
new_member = self.create_member(
user=None,
email=user2.email,
organization=self.org,
role="owner",
teams=[self.team],
)
# Simulate the user accessing the invite link
self.browser.get(new_member.get_invite_link().split("/", 3)[-1])
self.browser.wait_until('[data-test-id="accept-invite"]')
# Accept the invitation using the existing account
self.browser.click('a[data-test-id="link-with-existing"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
# Handle form validation: Prevent default invalid event blocking
self.browser.driver.execute_script(
"document.addEventListener('invalid', function(e) { e.preventDefault(); }, true);"
)
# Login using existing credentials
self._sign_in_user(email, password)
self.browser.wait_until('[data-test-id="2fa-warning"]')
# Display the 2FA configuration view
assert self.browser.element_exists("[aria-label='Configure Two-Factor Auth']")
|
AcceptOrganizationInviteTest
|
python
|
python-attrs__attrs
|
src/attr/validators.py
|
{
"start": 13203,
"end": 15117
}
|
class ____:
bound = attrib()
compare_op = attrib()
compare_func = attrib()
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if not self.compare_func(value, self.bound):
msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}"
raise ValueError(msg)
def __repr__(self):
return f"<Validator for x {self.compare_op} {self.bound}>"
def lt(val):
"""
A validator that raises `ValueError` if the initializer is called with a
number larger or equal to *val*.
The validator uses `operator.lt` to compare the values.
Args:
val: Exclusive upper bound for values.
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<", operator.lt)
def le(val):
"""
A validator that raises `ValueError` if the initializer is called with a
number greater than *val*.
The validator uses `operator.le` to compare the values.
Args:
val: Inclusive upper bound for values.
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, "<=", operator.le)
def ge(val):
"""
A validator that raises `ValueError` if the initializer is called with a
number smaller than *val*.
The validator uses `operator.ge` to compare the values.
Args:
val: Inclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">=", operator.ge)
def gt(val):
"""
A validator that raises `ValueError` if the initializer is called with a
number smaller or equal to *val*.
The validator uses `operator.gt` to compare the values.
Args:
val: Exclusive lower bound for values
.. versionadded:: 21.3.0
"""
return _NumberValidator(val, ">", operator.gt)
@attrs(repr=False, frozen=True, slots=True)
|
_NumberValidator
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_print_options05.py
|
{
"start": 315,
"end": 1310
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_options05.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.hide_gridlines(0)
worksheet.center_horizontally()
worksheet.center_vertically()
worksheet.print_row_col_headers()
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
getsentry__sentry
|
tests/sentry/web/frontend/test_organization_auth_settings.py
|
{
"start": 31981,
"end": 33936
}
|
class ____(AuthProviderTestCase):
provider = DummyGenericSAML2Provider
provider_name = "saml2_generic_dummy"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foobar@sentry.io")
self.organization = self.create_organization(owner=self.user, name="saml2-org")
self.auth_provider_inst = AuthProvider.objects.create(
provider=self.provider_name,
config=dummy_provider_config,
organization_id=self.organization.id,
)
def test_update_generic_saml2_config(self) -> None:
self.login_as(self.user, organization_id=self.organization.id)
expected_provider_config = {
"idp": {
"entity_id": "https://foobar.com/saml/metadata/4321",
"x509cert": "bar_x509_cert",
"sso_url": "http://foobar.com/sso_url",
"slo_url": "http://foobar.com/slo_url",
},
"attribute_mapping": {
Attributes.IDENTIFIER: "new_user_id",
Attributes.USER_EMAIL: "new_email",
Attributes.FIRST_NAME: "new_first_name",
Attributes.LAST_NAME: "new_last_name",
},
}
configure_path = reverse(
"sentry-organization-auth-provider-settings", args=[self.organization.slug]
)
payload = {
**expected_provider_config["idp"],
**expected_provider_config["attribute_mapping"],
}
resp = self.client.post(configure_path, payload)
assert resp.status_code == 200
actual = AuthProvider.objects.get(id=self.auth_provider_inst.id)
assert actual.config == expected_provider_config
assert actual.config != self.auth_provider_inst.config
assert actual.provider == self.auth_provider_inst.provider
assert actual.flags == self.auth_provider_inst.flags
|
OrganizationAuthSettingsGenericSAML2Test
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/map/_bounds.py
|
{
"start": 235,
"end": 4620
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.map"
_path_str = "layout.map.bounds"
_valid_props = {"east", "north", "south", "west"}
@property
def east(self):
"""
Sets the maximum longitude of the map (in degrees East) if
`west`, `south` and `north` are declared.
The 'east' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["east"]
@east.setter
def east(self, val):
self["east"] = val
@property
def north(self):
"""
Sets the maximum latitude of the map (in degrees North) if
`east`, `west` and `south` are declared.
The 'north' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["north"]
@north.setter
def north(self, val):
self["north"] = val
@property
def south(self):
"""
Sets the minimum latitude of the map (in degrees North) if
`east`, `west` and `north` are declared.
The 'south' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["south"]
@south.setter
def south(self, val):
self["south"] = val
@property
def west(self):
"""
Sets the minimum longitude of the map (in degrees East) if
`east`, `south` and `north` are declared.
The 'west' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["west"]
@west.setter
def west(self, val):
self["west"] = val
@property
def _prop_descriptions(self):
return """\
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
"""
def __init__(
self, arg=None, east=None, north=None, south=None, west=None, **kwargs
):
"""
Construct a new Bounds object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.map.Bounds`
east
Sets the maximum longitude of the map (in degrees East)
if `west`, `south` and `north` are declared.
north
Sets the maximum latitude of the map (in degrees North)
if `east`, `west` and `south` are declared.
south
Sets the minimum latitude of the map (in degrees North)
if `east`, `west` and `north` are declared.
west
Sets the minimum longitude of the map (in degrees East)
if `east`, `south` and `north` are declared.
Returns
-------
Bounds
"""
super().__init__("bounds")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.map.Bounds
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.map.Bounds`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("east", arg, east)
self._set_property("north", arg, north)
self._set_property("south", arg, south)
self._set_property("west", arg, west)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Bounds
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_decorators_1.py
|
{
"start": 303,
"end": 353
}
|
class ____:
x: datetime.datetime
@attrs.define
|
A
|
python
|
eventlet__eventlet
|
tests/greendns_test.py
|
{
"start": 15361,
"end": 17084
}
|
class ____(tests.LimitedTestCase):
def setUp(self):
base_resolver = _make_mock_base_resolver()
base_resolver.rr.address = '1.2.3.4'
self._old_resolver = greendns.resolver
greendns.resolver = base_resolver()
def tearDown(self):
greendns.resolver = self._old_resolver
def test_A(self):
ans = greendns.resolve('host.example.com', socket.AF_INET)
assert ans[0].address == '1.2.3.4'
assert greendns.resolver.args == ('host.example.com', dns.rdatatype.A)
def test_AAAA(self):
greendns.resolver.rr6.address = 'dead:beef::1'
ans = greendns.resolve('host.example.com', socket.AF_INET6)
assert ans[0].address == 'dead:beef::1'
assert greendns.resolver.args == ('host.example.com', dns.rdatatype.AAAA)
def test_unknown_rdtype(self):
with tests.assert_raises(socket.gaierror):
greendns.resolve('host.example.com', socket.AF_INET6 + 1)
def test_timeout(self):
greendns.resolver.raises = greendns.dns.exception.Timeout
with tests.assert_raises(socket.gaierror):
greendns.resolve('host.example.com')
def test_exc(self):
greendns.resolver.raises = greendns.dns.exception.DNSException
with tests.assert_raises(socket.gaierror):
greendns.resolve('host.example.com')
def test_noraise_noanswer(self):
greendns.resolver.rrset = None
ans = greendns.resolve('example.com', raises=False)
assert not ans.rrset
def test_noraise_nxdomain(self):
greendns.resolver.raises = greendns.dns.resolver.NXDOMAIN
ans = greendns.resolve('example.com', raises=False)
assert not ans.rrset
|
TestResolve
|
python
|
huggingface__transformers
|
tests/models/chinese_clip/test_image_processing_chinese_clip.py
|
{
"start": 3196,
"end": 5378
}
|
class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ChineseCLIPImageProcessor if is_vision_available() else None
fast_image_processing_class = ChineseCLIPImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = ChineseCLIPImageProcessingTester(self, do_center_crop=True)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
@unittest.skip(
reason="ChineseCLIPImageProcessor doesn't treat 4 channel PIL and numpy consistently yet"
) # FIXME Amy
def test_call_numpy_4_channels(self):
pass
@require_torch
@require_vision
|
ChineseCLIPImageProcessingTest
|
python
|
has2k1__plotnine
|
plotnine/geoms/geom_dotplot.py
|
{
"start": 625,
"end": 8718
}
|
class ____(geom):
"""
Dot plot
{usage}
Parameters
----------
{common_parameters}
stackdir : Literal["up", "down", "center", "centerwhole"], default="up"
Direction in which to stack the dots. Options are
stackratio : float, default=1
How close to stack the dots. If value is less than 1,
the dots overlap, if greater than 1 they are spaced.
dotsize : float, default=1
Diameter of dots relative to `binwidth`.
stackgroups : bool, default=False
If `True`{.py}, the dots are stacked across groups.
See Also
--------
plotnine.stat_bindot : The default `stat` for this `geom`.
"""
DEFAULT_AES = {"alpha": 1, "color": "black", "fill": "black"}
REQUIRED_AES = {"x", "y"}
NON_MISSING_AES = {"size", "shape"}
DEFAULT_PARAMS = {
"stat": "bindot",
"position": "identity",
"na_rm": False,
"stackdir": "up",
"stackratio": 1,
"dotsize": 1,
"stackgroups": False,
}
legend_key_size = staticmethod(geom_path.legend_key_size)
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
gp = self.params
sp = self._stat.params
# Issue warnings when parameters don't make sense
if gp["position"] == "stack":
warn(
'position="stack" doesn"t work properly with '
"geom_dotplot. Use stackgroups=True instead.",
PlotnineWarning,
)
if (
gp["stackgroups"]
and sp["method"] == "dotdensity"
and sp["binpositions"] == "bygroup"
):
warn(
"geom_dotplot called with stackgroups=TRUE and "
'method="dotdensity". You probably want to set '
'binpositions="all"',
PlotnineWarning,
)
if "width" not in data:
if sp["width"]:
data["width"] = sp["width"]
else:
data["width"] = resolution(data["x"], False) * 0.9
# Set up the stacking function and range
if gp["stackdir"] in (None, "up"):
def stackdots(a: FloatSeries) -> FloatSeries:
return a - 0.5
stackaxismin: float = 0
stackaxismax: float = 1
elif gp["stackdir"] == "down":
def stackdots(a: FloatSeries) -> FloatSeries:
return -a + 0.5
stackaxismin = -1
stackaxismax = 0
elif gp["stackdir"] == "center":
def stackdots(a: FloatSeries) -> FloatSeries:
return a - 1 - np.max(a - 1) / 2
stackaxismin = -0.5
stackaxismax = 0.5
elif gp["stackdir"] == "centerwhole":
def stackdots(a: FloatSeries) -> FloatSeries:
return a - 1 - np.floor(np.max(a - 1) / 2)
stackaxismin = -0.5
stackaxismax = 0.5
else:
raise ValueError(f"Invalid value stackdir={gp['stackdir']}")
# Fill the bins: at a given x (or y),
# if count=3, make 3 entries at that x
idx = [i for i, c in enumerate(data["count"]) for j in range(int(c))]
data = data.iloc[idx]
data.reset_index(inplace=True, drop=True)
# Next part will set the position of each dot within each stack
# If stackgroups=TRUE, split only on x (or y) and panel;
# if not stacking, also split by group
groupvars = [sp["binaxis"], "PANEL"]
if not gp["stackgroups"]:
groupvars.append("group")
# Within each x, or x+group, set countidx=1,2,3,
# and set stackpos according to stack function
def func(df: pd.DataFrame) -> pd.DataFrame:
df["countidx"] = range(1, len(df) + 1)
df["stackpos"] = stackdots(df["countidx"])
return df
# Within each x, or x+group, set countidx=1,2,3, and set
# stackpos according to stack function
data = groupby_apply(data, groupvars, func)
# Set the bounding boxes for the dots
if sp["binaxis"] == "x":
# ymin, ymax, xmin, and xmax define the bounding
# rectangle for each stack. Can't do bounding box per dot,
# because y position isn't real.
# After position code is rewritten, each dot should have
# its own bounding box.
data["xmin"] = data["x"] - data["binwidth"] / 2
data["xmax"] = data["x"] + data["binwidth"] / 2
data["ymin"] = stackaxismin
data["ymax"] = stackaxismax
data["y"] = 0
elif sp["binaxis"] == "y":
# ymin, ymax, xmin, and xmax define the bounding
# rectangle for each stack. Can't do bounding box per dot,
# because x position isn't real.
# xmin and xmax aren't really the x bounds. They're just
# set to the standard x +- width/2 so that dot clusters
# can be dodged like other geoms.
# After position code is rewritten, each dot should have
# its own bounding box.
def func(df: pd.DataFrame) -> pd.DataFrame:
df["ymin"] = df["y"].min() - data["binwidth"][0] / 2
df["ymax"] = df["y"].max() + data["binwidth"][0] / 2
return df
data = groupby_apply(data, "group", func)
data["xmin"] = data["x"] + data["width"] * stackaxismin
data["xmax"] = data["x"] + data["width"] * stackaxismax
return data
@staticmethod
def draw_group(
data: pd.DataFrame,
panel_params: panel_view,
coord: coord,
ax: Axes,
params: dict[str, Any],
):
from matplotlib.collections import PatchCollection
from matplotlib.patches import Ellipse
data = coord.transform(data, panel_params)
fill = to_rgba(data["fill"], data["alpha"])
color = to_rgba(data["color"], data["alpha"])
ranges = coord.range(panel_params)
# For perfect circles the width/height of the circle(ellipse)
# should factor in the dimensions of axes
bbox = ax.get_window_extent().transformed(
ax.figure.dpi_scale_trans.inverted()
)
ax_width, ax_height = bbox.width, bbox.height
factor = (ax_width / ax_height) * np.ptp(ranges.y) / np.ptp(ranges.x)
size = data["binwidth"].iloc[0] * params["dotsize"]
offsets = data["stackpos"] * params["stackratio"]
if params["binaxis"] == "x":
width, height = size, size * factor
xpos, ypos = data["x"], data["y"] + height * offsets
elif params["binaxis"] == "y":
width, height = size / factor, size
xpos, ypos = data["x"] + width * offsets, data["y"]
else:
raise ValueError(
f"Invalid valid value binaxis={params['binaxis']}"
)
circles = []
for xy in zip(xpos, ypos):
patch = Ellipse(xy, width=width, height=height)
circles.append(patch)
coll = PatchCollection(
circles,
edgecolors=color,
facecolors=fill,
rasterized=params["raster"],
)
ax.add_collection(coll)
@staticmethod
def draw_legend(
data: pd.Series[Any], da: DrawingArea, lyr: layer
) -> DrawingArea:
"""
Draw a point in the box
Parameters
----------
data : Series
Data Row
da : DrawingArea
Canvas
lyr : layer
Layer
Returns
-------
out : DrawingArea
"""
from matplotlib.lines import Line2D
fill = to_rgba(data["fill"], data["alpha"])
key = Line2D(
[0.5 * da.width],
[0.5 * da.height],
marker="o",
markersize=da.width / 2,
markerfacecolor=fill,
markeredgecolor=data["color"],
)
da.add_artist(key)
return da
|
geom_dotplot
|
python
|
spyder-ide__spyder
|
spyder/api/widgets/menus.py
|
{
"start": 1826,
"end": 17530
}
|
class ____(QMenu, SpyderFontsMixin):
"""
A QMenu subclass to implement additional functionality for Spyder.
"""
MENUS = []
APP_MENU = False
HORIZONTAL_MARGIN_FOR_ITEMS = 2 * AppStyle.MarginSize
HORIZONTAL_PADDING_FOR_ITEMS = 3 * AppStyle.MarginSize
def __init__(
self,
parent: Optional[QWidget] = None,
menu_id: Optional[str] = None,
title: Optional[str] = None,
min_width: Optional[int] = None,
reposition: Optional[bool] = True,
):
"""
Create a menu for Spyder.
Parameters
----------
parent: QWidget or None
The menu's parent
menu_id: str
Unique str identifier for the menu.
title: str or None
Localized text string for the menu.
min_width: int or None
Minimum width for the menu.
reposition: bool, optional (default True)
Whether to vertically reposition the menu due to it's padding.
"""
self._parent = parent
self.menu_id = menu_id
self._title = title
self._reposition = reposition
self._sections = []
self._actions = []
self._actions_map = {}
self._unintroduced_actions = {}
self._after_sections = {}
self._dirty = False
self._is_shown = False
self._is_submenu = False
self._in_app_menu = False
if title is None:
super().__init__(parent)
else:
super().__init__(title, parent)
self.MENUS.append((parent, title, self))
# Set min width
if min_width is not None:
self.setMinimumWidth(min_width)
# Signals
self.aboutToShow.connect(self.render)
# Adjustmens for Mac
if sys.platform == 'darwin':
# Needed to enable the dynamic population of actions in app menus
# in the aboutToShow signal.
# See spyder-ide/spyder#14612
if self.APP_MENU:
self.addAction(QAction(self))
# Necessary to follow Mac's HIG for app menus.
self.aboutToShow.connect(self._set_icons)
# Style
self.css = self._generate_stylesheet()
self.setStyleSheet(self.css.toString())
style = SpyderMenuProxyStyle(None)
style.setParent(self)
self.setStyle(style)
# ---- Public API
# -------------------------------------------------------------------------
def clear_actions(self):
"""
Remove actions from the menu (including custom references)
Returns
-------
None.
"""
self.clear()
self._sections = []
self._actions = []
self._actions_map = {}
self._unintroduced_actions = {}
self._after_sections = {}
def add_action(self: T,
action: Union[SpyderAction, T],
section: Optional[str] = None,
before: Optional[str] = None,
before_section: Optional[str] = None,
check_before: bool = True,
omit_id: bool = False):
"""
Add action to a given menu section.
Parameters
----------
action: SpyderAction
The action to add.
section: str or None
The section id in which to insert the `action`.
before: str
Make the action appear before the given action identifier.
before_section: str or None
Make the item section (if provided) appear before another
given section.
check_before: bool
Check if the `before` action is part of the menu. This is
necessary to avoid an infinite recursion when adding
unintroduced actions with this method again.
omit_id: bool
If True, then the menu will check if the item to add declares an
id, False otherwise. This flag exists only for items added on
Spyder 4 plugins. Default: False
"""
item_id = None
if isinstance(action, SpyderAction) or hasattr(action, 'action_id'):
item_id = action.action_id
# This is necessary when we set a menu for `action`, e.g. for
# todo_list_action in EditorMainWidget.
if action.menu() and isinstance(action.menu(), SpyderMenu):
action.menu()._is_submenu = True
elif isinstance(action, SpyderMenu) or hasattr(action, 'menu_id'):
item_id = action.menu_id
action._is_submenu = True
if not omit_id and item_id is None and action is not None:
raise AttributeError(f'Item {action} must declare an id.')
if before is None:
self._actions.append((section, action))
else:
new_actions = []
added = False
before_item = self._actions_map.get(before, None)
for sec, act in self._actions:
if before_item is not None and act == before_item:
added = True
new_actions.append((section, action))
new_actions.append((sec, act))
# Actions can't be added to the menu if the `before` action is
# not part of it yet. That's why we need to save them in the
# `_unintroduced_actions` dict, so we can add them again when
# the menu is rendered.
if not added and check_before:
before_actions = self._unintroduced_actions.get(before, [])
before_actions.append((section, action))
self._unintroduced_actions[before] = before_actions
self._actions = new_actions
if section not in self._sections:
self._add_section(section, before_section)
# Track state of menu to avoid re-rendering if menu has not changed
self._dirty = True
self._actions_map[item_id] = action
def remove_action(self, item_id: str):
if item_id in self._actions_map:
action = self._actions_map.pop(item_id)
position = None
for i, (_, act) in enumerate(self._actions):
if act == action:
position = i
break
if position is not None:
self._actions.pop(position)
self._dirty = True
def get_title(self):
"""
Return the title for menu.
"""
return self._title
def get_actions(self):
"""
Return a parsed list of menu actions.
Includes MENU_SEPARATOR taking into account the sections defined.
"""
actions = []
for section in self._sections:
for (sec, action) in self._actions:
if sec == section:
actions.append(action)
actions.append(MENU_SEPARATOR)
return actions
def get_sections(self):
"""
Return a tuple of menu sections.
"""
return tuple(self._sections)
# ---- Private API
# -------------------------------------------------------------------------
def _add_missing_actions(self):
"""
Add actions that were not introduced to the menu because a `before`
action they require is not part of it.
"""
for before, actions in self._unintroduced_actions.items():
for section, action in actions:
self.add_action(
action,
section=section,
before=before,
check_before=False
)
self._unintroduced_actions = {}
def render(self, force=False):
"""
Create the menu prior to showing it. This takes into account sections
and location of menus.
Parameters
----------
force: bool, optional
Whether to force rendering the menu.
"""
if self._dirty or force:
self.clear()
self._add_missing_actions()
actions = self.get_actions()
add_actions(self, actions)
self._set_icons()
self._dirty = False
def _add_section(self, section, before_section=None):
"""
Add a new section to the list of sections in this menu.
Parameters
----------
before_section: str or None
Make `section` appear before another one.
"""
inserted_before_other = False
if before_section is not None:
if before_section in self._sections:
# If before_section was already introduced, we simply need to
# insert the new section on its position, which will put it
# exactly behind before_section.
idx = self._sections.index(before_section)
self._sections.insert(idx, section)
inserted_before_other = True
else:
# If before_section hasn't been introduced yet, we know we need
# to insert it after section when it's finally added to the
# menu. So, we preserve that info in the _after_sections dict.
self._after_sections[before_section] = section
# Append section to the list of sections because we assume
# people build menus from top to bottom, i.e. they add its
# upper sections first.
self._sections.append(section)
else:
self._sections.append(section)
# Check if section should be inserted after another one, according to
# what we have in _after_sections.
after_section = self._after_sections.pop(section, None)
if after_section is not None:
if not inserted_before_other:
# Insert section to the right of after_section, if it was not
# inserted before another one.
if section in self._sections:
self._sections.remove(section)
index = self._sections.index(after_section)
self._sections.insert(index + 1, section)
else:
# If section was already inserted before another one, then we
# need to move after_section to its left instead.
if after_section in self._sections:
self._sections.remove(after_section)
idx = self._sections.index(section)
self._sections.insert(idx, after_section)
def _set_icons(self):
"""
Unset menu icons for app menus and set them for regular menus.
This is necessary only for Mac to follow its Human Interface
Guidelines (HIG), which don't recommend icons in app menus.
"""
if sys.platform == "darwin":
if self.APP_MENU or self._in_app_menu:
set_menu_icons(self, False, in_app_menu=True)
else:
set_menu_icons(self, True)
@classmethod
def _generate_stylesheet(cls):
"""Generate base stylesheet for menus."""
css = qstylizer.style.StyleSheet()
font = cls.get_font(SpyderFontType.Interface)
# Add padding and border to follow modern standards
css.QMenu.setValues(
# Only add top and bottom padding so that menu separators can go
# completely from the left to right border.
paddingTop=f'{2 * AppStyle.MarginSize}px',
paddingBottom=f'{2 * AppStyle.MarginSize}px',
# This uses the same color as the separator
border=f"1px solid {SpyderPalette.COLOR_BACKGROUND_6}"
)
# Set the right background color. This is the only way to do it!
css['QWidget:disabled QMenu'].setValues(
backgroundColor=SpyderPalette.COLOR_BACKGROUND_3,
)
# Add padding around separators to prevent that hovering on items hides
# them.
css["QMenu::separator"].setValues(
# Only add top and bottom margins so that the separators can go
# completely from the left to right border.
margin=f'{2 * AppStyle.MarginSize}px 0px',
)
# Set menu item properties
delta_top = 0 if (MAC or WIN) else 1
delta_bottom = 0 if MAC else (2 if WIN else 1)
css["QMenu::item"].setValues(
height='1.1em' if MAC else ('1.35em' if WIN else '1.25em'),
marginLeft=f'{cls.HORIZONTAL_MARGIN_FOR_ITEMS}px',
marginRight=f'{cls.HORIZONTAL_MARGIN_FOR_ITEMS}px',
paddingTop=f'{AppStyle.MarginSize + delta_top}px',
paddingBottom=f'{AppStyle.MarginSize + delta_bottom}px',
paddingLeft=f'{cls.HORIZONTAL_PADDING_FOR_ITEMS}px',
paddingRight=f'{cls.HORIZONTAL_PADDING_FOR_ITEMS}px',
fontFamily=font.family(),
fontSize=f'{font.pointSize()}pt',
backgroundColor='transparent'
)
# Set hover and pressed state of items
for state in ['selected', 'pressed']:
if state == 'selected':
bg_color = SpyderPalette.COLOR_BACKGROUND_4
else:
bg_color = SpyderPalette.COLOR_BACKGROUND_5
css[f"QMenu::item:{state}"].setValues(
backgroundColor=bg_color,
borderRadius=SpyderPalette.SIZE_BORDER_RADIUS
)
# Set disabled state of items
for state in ['disabled', 'selected:disabled']:
css[f"QMenu::item:{state}"].setValues(
color=SpyderPalette.COLOR_DISABLED,
backgroundColor="transparent"
)
return css
def __str__(self):
return f"SpyderMenu('{self.menu_id}')"
def __repr__(self):
return f"SpyderMenu('{self.menu_id}')"
def _adjust_menu_position(self):
"""Menu position adjustment logic to follow custom style."""
if not self._is_shown:
# Reposition submenus vertically due to padding and border
if self._reposition and self._is_submenu:
self.move(
self.pos().x(),
# Current vertical pos - padding - border
self.pos().y() - 2 * AppStyle.MarginSize - 1
)
self._is_shown = True
# Reposition menus horizontally due to border
if self.APP_MENU:
delta_x = 0 if MAC else 3
else:
if QCursor().pos().x() - self.pos().x() < 40:
# If the difference between the current cursor x position and
# the menu one is small, it means the menu will be shown to the
# right, so we need to move it in that direction.
delta_x = 1
else:
# This happens when the menu is shown to the left.
delta_x = -1
self.move(self.pos().x() + delta_x, self.pos().y())
# ---- Qt methods
# -------------------------------------------------------------------------
def showEvent(self, event):
"""Call adjustments when the menu is going to be shown."""
# To prevent race conditions which can cause partially showing a menu
# (as in spyder-ide/spyder#22266), we use a timer to queue the move
# related events after the menu is shown.
# For more info you can check:
# * https://forum.qt.io/topic/23381/showevent-not-working/3
# * https://stackoverflow.com/a/49351518
QTimer.singleShot(0, self._adjust_menu_position)
|
SpyderMenu
|
python
|
doocs__leetcode
|
solution/1800-1899/1812.Determine Color of a Chessboard Square/Solution.py
|
{
"start": 0,
"end": 139
}
|
class ____:
def squareIsWhite(self, coordinates: str) -> bool:
return (ord(coordinates[0]) + ord(coordinates[1])) % 2 == 1
|
Solution
|
python
|
weaviate__weaviate-python-client
|
weaviate/exceptions.py
|
{
"start": 428,
"end": 898
}
|
class ____(Exception):
"""Weaviate base exception that all Weaviate exceptions should inherit from.
This error can be used to catch any Weaviate exceptions.
"""
def __init__(self, message: str = ""):
"""Weaviate base exception initializer.
Args:
message (str): An error message specific to the context in which the error occurred.
"""
self.message = message
super().__init__(message)
|
WeaviateBaseError
|
python
|
sympy__sympy
|
sympy/assumptions/cnf.py
|
{
"start": 2030,
"end": 2751
}
|
class ____:
"""
A low-level implementation for Or
"""
def __init__(self, *args):
self._args = args
@property
def args(self):
return sorted(self._args, key=str)
def rcall(self, expr):
return type(self)(*[arg.rcall(expr)
for arg in self._args
])
def __invert__(self):
return AND(*[~arg for arg in self._args])
def __hash__(self):
return hash((type(self).__name__,) + tuple(self.args))
def __eq__(self, other):
return self.args == other.args
def __str__(self):
s = '(' + ' | '.join([str(arg) for arg in self.args]) + ')'
return s
__repr__ = __str__
|
OR
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/venv/purge.py
|
{
"start": 261,
"end": 2387
}
|
class ____(BaseCommand):
"""Purge selected/all created Virtualenvs"""
arguments = (verbose_option,)
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force purging without prompting for confirmation",
)
parser.add_argument(
"-i",
"--interactive",
action="store_true",
help="Interactively purge selected Virtualenvs",
)
def handle(self, project: Project, options: argparse.Namespace) -> None:
all_central_venvs = list(iter_central_venvs(project))
if not all_central_venvs:
project.core.ui.echo("No virtualenvs to purge, quitting.", style="success")
return
if not options.force:
project.core.ui.echo("The following Virtualenvs will be purged:", style="warning")
for i, venv in enumerate(all_central_venvs):
project.core.ui.echo(f"{i}. [success]{venv[0]}[/]")
if not options.interactive:
if options.force or termui.confirm("continue?", default=True):
return self.del_all_venvs(project)
selection = termui.ask(
"Please select",
choices=([str(i) for i in range(len(all_central_venvs))] + ["all", "none"]),
default="none",
show_choices=False,
)
if selection == "all":
self.del_all_venvs(project)
elif selection != "none":
for i, venv in enumerate(all_central_venvs):
if i == int(selection):
shutil.rmtree(venv[1])
project.core.ui.echo("Purged successfully!")
def del_all_venvs(self, project: Project) -> None:
saved_python = project._saved_python
for _, venv in iter_central_venvs(project):
shutil.rmtree(venv)
if saved_python and Path(saved_python).parent.parent == venv:
project._saved_python = None
project.core.ui.echo("Purged successfully!")
|
PurgeCommand
|
python
|
streamlit__streamlit
|
lib/streamlit/testing/v1/element_tree.py
|
{
"start": 17970,
"end": 18142
}
|
class ____(HeadingBase):
def __init__(self, proto: HeadingProto, root: ElementTree) -> None:
super().__init__(proto, root, "header")
@dataclass(repr=False)
|
Header
|
python
|
scrapy__scrapy
|
tests/test_feedexport.py
|
{
"start": 2593,
"end": 4940
}
|
class ____:
def test_store_file_uri(self, tmp_path):
path = tmp_path / "file.txt"
uri = path_to_file_uri(str(path))
self._assert_stores(FileFeedStorage(uri), path)
def test_store_file_uri_makedirs(self, tmp_path):
path = tmp_path / "more" / "paths" / "file.txt"
uri = path_to_file_uri(str(path))
self._assert_stores(FileFeedStorage(uri), path)
def test_store_direct_path(self, tmp_path):
path = tmp_path / "file.txt"
self._assert_stores(FileFeedStorage(str(path)), path)
def test_store_direct_path_relative(self, tmp_path):
old_cwd = Path.cwd()
try:
os.chdir(tmp_path)
path = Path("foo", "bar")
self._assert_stores(FileFeedStorage(str(path)), path)
finally:
os.chdir(old_cwd)
def test_interface(self, tmp_path):
path = tmp_path / "file.txt"
st = FileFeedStorage(str(path))
verifyObject(IFeedStorage, st)
@staticmethod
def _store(path: Path, feed_options: dict[str, Any] | None = None) -> None:
storage = FileFeedStorage(str(path), feed_options=feed_options)
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
storage.store(file)
def test_append(self, tmp_path):
path = tmp_path / "file.txt"
self._store(path)
self._assert_stores(FileFeedStorage(str(path)), path, b"contentcontent")
def test_overwrite(self, tmp_path):
path = tmp_path / "file.txt"
self._store(path, {"overwrite": True})
self._assert_stores(
FileFeedStorage(str(path), feed_options={"overwrite": True}), path
)
@staticmethod
def _assert_stores(
storage: FileFeedStorage, path: Path, expected_content: bytes = b"content"
) -> None:
spider = scrapy.Spider("default")
file = storage.open(spider)
file.write(b"content")
storage.store(file)
assert path.exists()
try:
assert path.read_bytes() == expected_content
finally:
path.unlink()
def test_preserves_windows_path_without_file_scheme(self):
path = r"C:\Users\user\Desktop\test.txt"
storage = FileFeedStorage(path)
assert storage.path == path
|
TestFileFeedStorage
|
python
|
EpistasisLab__tpot
|
tpot/tpot_estimator/estimator.py
|
{
"start": 2501,
"end": 56515
}
|
class ____(BaseEstimator):
def __init__(self,
search_space,
scorers,
scorers_weights,
classification,
cv = 10,
other_objective_functions=[],
other_objective_functions_weights = [],
objective_function_names = None,
bigger_is_better = True,
export_graphpipeline = False,
memory = None,
categorical_features = None,
preprocessing = False,
population_size = 50,
initial_population_size = None,
population_scaling = .5,
generations_until_end_population = 1,
generations = None,
max_time_mins=60,
max_eval_time_mins=10,
validation_strategy = "none",
validation_fraction = .2,
disable_label_encoder = False,
#early stopping parameters
early_stop = None,
scorers_early_stop_tol = 0.001,
other_objectives_early_stop_tol =None,
threshold_evaluation_pruning = None,
threshold_evaluation_scaling = .5,
selection_evaluation_pruning = None,
selection_evaluation_scaling = .5,
min_history_threshold = 20,
#evolver parameters
survival_percentage = 1,
crossover_probability=.2,
mutate_probability=.7,
mutate_then_crossover_probability=.05,
crossover_then_mutate_probability=.05,
survival_selector = survival_select_NSGA2,
parent_selector = tournament_selection_dominated,
#budget parameters
budget_range = None,
budget_scaling = .5,
generations_until_end_budget = 1,
stepwise_steps = 5,
#dask parameters
n_jobs=1,
memory_limit = None,
client = None,
processes = True,
#debugging and logging parameters
warm_start = False,
periodic_checkpoint_folder = None,
callback = None,
verbose = 0,
scatter = True,
# random seed for random number generator (rng)
random_state = None,
):
'''
An sklearn baseestimator that uses genetic programming to optimize a pipeline.
Parameters
----------
search_space : (String, tpot.search_spaces.SearchSpace)
- String : The default search space to use for the optimization.
| String | Description |
| :--- | :----: |
| linear | A linear pipeline with the structure of "Selector->(transformers+Passthrough)->(classifiers/regressors+Passthrough)->final classifier/regressor." For both the transformer and inner estimator layers, TPOT may choose one or more transformers/classifiers, or it may choose none. The inner classifier/regressor layer is optional. |
| linear-light | Same search space as linear, but without the inner classifier/regressor layer and with a reduced set of faster running estimators. |
| graph | TPOT will optimize a pipeline in the shape of a directed acyclic graph. The nodes of the graph can include selectors, scalers, transformers, or classifiers/regressors (inner classifiers/regressors can optionally be not included). This will return a custom GraphPipeline rather than an sklearn Pipeline. More details in Tutorial 6. |
| graph-light | Same as graph search space, but without the inner classifier/regressors and with a reduced set of faster running estimators. |
| mdr |TPOT will search over a series of feature selectors and Multifactor Dimensionality Reduction models to find a series of operators that maximize prediction accuracy. The TPOT MDR configuration is specialized for genome-wide association studies (GWAS), and is described in detail online here.
Note that TPOT MDR may be slow to run because the feature selection routines are computationally expensive, especially on large datasets. |
- SearchSpace : The search space to use for the optimization. This should be an instance of a SearchSpace.
The search space to use for the optimization. This should be an instance of a SearchSpace.
TPOT has groups of search spaces found in the following folders, tpot.search_spaces.nodes for the nodes in the pipeline and tpot.search_spaces.pipelines for the pipeline structure.
scorers : (list, scorer)
A scorer or list of scorers to be used in the cross-validation process.
see https://scikit-learn.org/stable/modules/model_evaluation.html
scorers_weights : list
A list of weights to be applied to the scorers during the optimization process.
classification : bool
If True, the problem is treated as a classification problem. If False, the problem is treated as a regression problem.
Used to determine the CV strategy.
cv : int, cross-validator
- (int): Number of folds to use in the cross-validation process. By uses the sklearn.model_selection.KFold cross-validator for regression and StratifiedKFold for classification. In both cases, shuffled is set to True.
- (sklearn.model_selection.BaseCrossValidator): A cross-validator to use in the cross-validation process.
- max_depth (int): The maximum depth from any node to the root of the pipelines to be generated.
other_objective_functions : list, default=[]
A list of other objective functions to apply to the pipeline. The function takes a single parameter for the graphpipeline estimator and returns either a single score or a list of scores.
other_objective_functions_weights : list, default=[]
A list of weights to be applied to the other objective functions.
objective_function_names : list, default=None
A list of names to be applied to the objective functions. If None, will use the names of the objective functions.
bigger_is_better : bool, default=True
If True, the objective function is maximized. If False, the objective function is minimized. Use negative weights to reverse the direction.
memory: Memory object or string, default=None
If supplied, pipeline will cache each transformer after calling fit with joblib.Memory. This feature
is used to avoid computing the fit transformers within a pipeline if the parameters
and input data are identical with another fitted pipeline during optimization process.
- String 'auto':
TPOT uses memory caching with a temporary directory and cleans it up upon shutdown.
- String path of a caching directory
TPOT uses memory caching with the provided directory and TPOT does NOT clean
the caching directory up upon shutdown. If the directory does not exist, TPOT will
create it.
- Memory object:
TPOT uses the instance of joblib.Memory for memory caching,
and TPOT does NOT clean the caching directory up upon shutdown.
- None:
TPOT does not use memory caching.
categorical_features: list or None
Categorical columns to inpute and/or one hot encode during the preprocessing step. Used only if preprocessing is not False.
- None : If None, TPOT will automatically use object columns in pandas dataframes as objects for one hot encoding in preprocessing.
- List of categorical features. If X is a dataframe, this should be a list of column names. If X is a numpy array, this should be a list of column indices
preprocessing : bool or BaseEstimator/Pipeline,
EXPERIMENTAL - will be changed in future versions
A pipeline that will be used to preprocess the data before CV. Note that the parameters for these steps are not optimized. Add them to the search space to be optimized.
- bool : If True, will use a default preprocessing pipeline which includes imputation followed by one hot encoding.
- Pipeline : If an instance of a pipeline is given, will use that pipeline as the preprocessing pipeline.
population_size : int, default=50
Size of the population
initial_population_size : int, default=None
Size of the initial population. If None, population_size will be used.
population_scaling : int, default=0.5
Scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
generations_until_end_population : int, default=1
Number of generations until the population size reaches population_size
generations : int, default=None
Number of generations to run
max_time_mins : float, default=60
Maximum time to run the optimization. If none or inf, will run until the end of the generations.
max_eval_time_mins : float, default=10
Maximum time to evaluate a single individual. If none or inf, there will be no time limit per evaluation.
validation_strategy : str, default='none'
EXPERIMENTAL The validation strategy to use for selecting the final pipeline from the population. TPOT may overfit the cross validation score. A second validation set can be used to select the final pipeline.
- 'auto' : Automatically determine the validation strategy based on the dataset shape.
- 'reshuffled' : Use the same data for cross validation and final validation, but with different splits for the folds. This is the default for small datasets.
- 'split' : Use a separate validation set for final validation. Data will be split according to validation_fraction. This is the default for medium datasets.
- 'none' : Do not use a separate validation set for final validation. Select based on the original cross-validation score. This is the default for large datasets.
validation_fraction : float, default=0.2
EXPERIMENTAL The fraction of the dataset to use for the validation set when validation_strategy is 'split'. Must be between 0 and 1.
disable_label_encoder : bool, default=False
If True, TPOT will check if the target needs to be relabeled to be sequential ints from 0 to N. This is necessary for XGBoost compatibility. If the labels need to be encoded, TPOT will use sklearn.preprocessing.LabelEncoder to encode the labels. The encoder can be accessed via the self.label_encoder_ attribute.
If False, no additional label encoders will be used.
early_stop : int, default=None
Number of generations without improvement before early stopping. All objectives must have converged within the tolerance for this to be triggered. In general a value of around 5-20 is good.
scorers_early_stop_tol :
-list of floats
list of tolerances for each scorer. If the difference between the best score and the current score is less than the tolerance, the individual is considered to have converged
If an index of the list is None, that item will not be used for early stopping
-int
If an int is given, it will be used as the tolerance for all objectives
other_objectives_early_stop_tol :
-list of floats
list of tolerances for each of the other objective function. If the difference between the best score and the current score is less than the tolerance, the individual is considered to have converged
If an index of the list is None, that item will not be used for early stopping
-int
If an int is given, it will be used as the tolerance for all objectives
threshold_evaluation_pruning : list [start, end], default=None
starting and ending percentile to use as a threshold for the evaluation early stopping.
Values between 0 and 100.
threshold_evaluation_scaling : float [0,inf), default=0.5
A scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
Must be greater than zero. Higher numbers will move the threshold to the end faster.
selection_evaluation_pruning : list, default=None
A lower and upper percent of the population size to select each round of CV.
Values between 0 and 1.
selection_evaluation_scaling : float, default=0.5
A scaling factor to use when determining how fast we move the threshold moves from the start to end percentile.
Must be greater than zero. Higher numbers will move the threshold to the end faster.
min_history_threshold : int, default=0
The minimum number of previous scores needed before using threshold early stopping.
survival_percentage : float, default=1
Percentage of the population size to utilize for mutation and crossover at the beginning of the generation. The rest are discarded. Individuals are selected with the selector passed into survival_selector. The value of this parameter must be between 0 and 1, inclusive.
For example, if the population size is 100 and the survival percentage is .5, 50 individuals will be selected with NSGA2 from the existing population. These will be used for mutation and crossover to generate the next 100 individuals for the next generation. The remainder are discarded from the live population. In the next generation, there will now be the 50 parents + the 100 individuals for a total of 150. Surivival percentage is based of the population size parameter and not the existing population size (current population size when using successive halving). Therefore, in the next generation we will still select 50 individuals from the currently existing 150.
crossover_probability : float, default=.2
Probability of generating a new individual by crossover between two individuals.
mutate_probability : float, default=.7
Probability of generating a new individual by crossover between one individuals.
mutate_then_crossover_probability : float, default=.05
Probability of generating a new individual by mutating two individuals followed by crossover.
crossover_then_mutate_probability : float, default=.05
Probability of generating a new individual by crossover between two individuals followed by a mutation of the resulting individual.
survival_selector : function, default=survival_select_NSGA2
Function to use to select individuals for survival. Must take a matrix of scores and return selected indexes.
Used to selected population_size * survival_percentage individuals at the start of each generation to use for mutation and crossover.
parent_selector : function, default=parent_select_NSGA2
Function to use to select pairs parents for crossover and individuals for mutation. Must take a matrix of scores and return selected indexes.
budget_range : list [start, end], default=None
A starting and ending budget to use for the budget scaling.
budget_scaling float : [0,1], default=0.5
A scaling factor to use when determining how fast we move the budget from the start to end budget.
generations_until_end_budget : int, default=1
The number of generations to run before reaching the max budget.
stepwise_steps : int, default=1
The number of staircase steps to take when scaling the budget and population size.
n_jobs : int, default=1
Number of processes to run in parallel.
memory_limit : str, default=None
Memory limit for each job. See Dask [LocalCluster documentation](https://distributed.dask.org/en/stable/api.html#distributed.Client) for more information.
client : dask.distributed.Client, default=None
A dask client to use for parallelization. If not None, this will override the n_jobs and memory_limit parameters. If None, will create a new client with num_workers=n_jobs and memory_limit=memory_limit.
processes : bool, default=True
If True, will use multiprocessing to parallelize the optimization process. If False, will use threading.
True seems to perform better. However, False is required for interactive debugging.
warm_start : bool, default=False
If True, will use the continue the evolutionary algorithm from the last generation of the previous run.
periodic_checkpoint_folder : str, default=None
Folder to save the population to periodically. If None, no periodic saving will be done.
If provided, training will resume from this checkpoint.
callback : tpot.CallBackInterface, default=None
Callback object. Not implemented
verbose : int, default=1
How much information to print during the optimization process. Higher values include the information from lower values.
0. nothing
1. progress bar
3. best individual
4. warnings
>=5. full warnings trace
6. evaluations progress bar. (Temporary: This used to be 2. Currently, using evaluation progress bar may prevent some instances were we terminate a generation early due to it reaching max_time_mins in the middle of a generation OR a pipeline failed to be terminated normally and we need to manually terminate it.)
scatter : bool, default=True
If True, will scatter the data to the dask workers. If False, will not scatter the data. This can be useful for debugging.
random_state : int, None, default=None
A seed for reproducability of experiments. This value will be passed to numpy.random.default_rng() to create an instnce of the genrator to pass to other classes
- int
Will be used to create and lock in Generator instance with 'numpy.random.default_rng()'
- None
Will be used to create Generator for 'numpy.random.default_rng()' where a fresh, unpredictable entropy will be pulled from the OS
Attributes
----------
fitted_pipeline_ : GraphPipeline
A fitted instance of the GraphPipeline that inherits from sklearn BaseEstimator. This is fitted on the full X, y passed to fit.
evaluated_individuals : A pandas data frame containing data for all evaluated individuals in the run.
Columns:
- *objective functions : The first few columns correspond to the passed in scorers and objective functions
- Parents : A tuple containing the indexes of the pipelines used to generate the pipeline of that row. If NaN, this pipeline was generated randomly in the initial population.
- Variation_Function : Which variation function was used to mutate or crossover the parents. If NaN, this pipeline was generated randomly in the initial population.
- Individual : The internal representation of the individual that is used during the evolutionary algorithm. This is not an sklearn BaseEstimator.
- Generation : The generation the pipeline first appeared.
- Pareto_Front : The nondominated front that this pipeline belongs to. 0 means that its scores is not strictly dominated by any other individual.
To save on computational time, the best frontier is updated iteratively each generation.
The pipelines with the 0th pareto front do represent the exact best frontier. However, the pipelines with pareto front >= 1 are only in reference to the other pipelines in the final population.
All other pipelines are set to NaN.
- Instance : The unfitted GraphPipeline BaseEstimator.
- *validation objective functions : Objective function scores evaluated on the validation set.
- Validation_Pareto_Front : The full pareto front calculated on the validation set. This is calculated for all pipelines with Pareto_Front equal to 0. Unlike the Pareto_Front which only calculates the frontier and the final population, the Validation Pareto Front is calculated for all pipelines tested on the validation set.
pareto_front : The same pandas dataframe as evaluated individuals, but containing only the frontier pareto front pipelines.
'''
# sklearn BaseEstimator must have a corresponding attribute for each parameter.
# These should not be modified once set.
self.scorers = scorers
self.scorers_weights = scorers_weights
self.classification = classification
self.cv = cv
self.other_objective_functions = other_objective_functions
self.other_objective_functions_weights = other_objective_functions_weights
self.objective_function_names = objective_function_names
self.bigger_is_better = bigger_is_better
self.search_space = search_space
self.export_graphpipeline = export_graphpipeline
self.memory = memory
self.categorical_features = categorical_features
self.preprocessing = preprocessing
self.validation_strategy = validation_strategy
self.validation_fraction = validation_fraction
self.disable_label_encoder = disable_label_encoder
self.population_size = population_size
self.initial_population_size = initial_population_size
self.population_scaling = population_scaling
self.generations_until_end_population = generations_until_end_population
self.generations = generations
self.early_stop = early_stop
self.scorers_early_stop_tol = scorers_early_stop_tol
self.other_objectives_early_stop_tol = other_objectives_early_stop_tol
self.max_time_mins = max_time_mins
self.max_eval_time_mins = max_eval_time_mins
self.n_jobs= n_jobs
self.memory_limit = memory_limit
self.client = client
self.survival_percentage = survival_percentage
self.crossover_probability = crossover_probability
self.mutate_probability = mutate_probability
self.mutate_then_crossover_probability= mutate_then_crossover_probability
self.crossover_then_mutate_probability= crossover_then_mutate_probability
self.survival_selector=survival_selector
self.parent_selector=parent_selector
self.budget_range = budget_range
self.budget_scaling = budget_scaling
self.generations_until_end_budget = generations_until_end_budget
self.stepwise_steps = stepwise_steps
self.threshold_evaluation_pruning =threshold_evaluation_pruning
self.threshold_evaluation_scaling = threshold_evaluation_scaling
self.min_history_threshold = min_history_threshold
self.selection_evaluation_pruning = selection_evaluation_pruning
self.selection_evaluation_scaling = selection_evaluation_scaling
self.warm_start = warm_start
self.verbose = verbose
self.periodic_checkpoint_folder = periodic_checkpoint_folder
self.callback = callback
self.processes = processes
self.scatter = scatter
timer_set = self.max_time_mins != float("inf") and self.max_time_mins is not None
if self.generations is not None and timer_set:
warnings.warn("Both generations and max_time_mins are set. TPOT will terminate when the first condition is met.")
# create random number generator based on rngseed
self.rng = np.random.default_rng(random_state)
# save random state passed to us for other functions that use random_state
self.random_state = random_state
#Initialize other used params
if self.initial_population_size is None:
self._initial_population_size = self.population_size
else:
self._initial_population_size = self.initial_population_size
if isinstance(self.scorers, str):
self._scorers = [self.scorers]
elif callable(self.scorers):
self._scorers = [self.scorers]
else:
self._scorers = self.scorers
self._scorers = [sklearn.metrics.get_scorer(scoring) for scoring in self._scorers]
self._scorers_early_stop_tol = self.scorers_early_stop_tol
self._evolver = tpot.evolvers.BaseEvolver
self.objective_function_weights = [*scorers_weights, *other_objective_functions_weights]
if self.objective_function_names is None:
obj_names = [f.__name__ for f in other_objective_functions]
else:
obj_names = self.objective_function_names
self.objective_names = [f._score_func.__name__ if hasattr(f,"_score_func") else f.__name__ for f in self._scorers] + obj_names
if not isinstance(self.other_objectives_early_stop_tol, list):
self._other_objectives_early_stop_tol = [self.other_objectives_early_stop_tol for _ in range(len(self.other_objective_functions))]
else:
self._other_objectives_early_stop_tol = self.other_objectives_early_stop_tol
if not isinstance(self._scorers_early_stop_tol, list):
self._scorers_early_stop_tol = [self._scorers_early_stop_tol for _ in range(len(self._scorers))]
else:
self._scorers_early_stop_tol = self._scorers_early_stop_tol
self.early_stop_tol = [*self._scorers_early_stop_tol, *self._other_objectives_early_stop_tol]
self._evolver_instance = None
self.evaluated_individuals = None
self.label_encoder_ = None
set_dask_settings()
def fit(self, X, y):
if self.client is not None: #If user passed in a client manually
_client = self.client
else:
if self.verbose >= 4:
silence_logs = 30
elif self.verbose >=5:
silence_logs = 40
else:
silence_logs = 50
cluster = LocalCluster(n_workers=self.n_jobs, #if no client is passed in and no global client exists, create our own
threads_per_worker=1,
processes=self.processes,
silence_logs=silence_logs,
memory_limit=self.memory_limit)
_client = Client(cluster)
if self.classification and not self.disable_label_encoder and not check_if_y_is_encoded(y):
warnings.warn("Labels are not encoded as ints from 0 to N. For compatibility with some classifiers such as sklearn, TPOT has encoded y with the sklearn LabelEncoder. When using pipelines outside the main TPOT estimator class, you can encode the labels with est.label_encoder_")
self.label_encoder_ = LabelEncoder()
y = self.label_encoder_.fit_transform(y)
self.evaluated_individuals = None
#determine validation strategy
if self.validation_strategy == 'auto':
nrows = X.shape[0]
ncols = X.shape[1]
if nrows/ncols < 20:
validation_strategy = 'reshuffled'
elif nrows/ncols < 100:
validation_strategy = 'split'
else:
validation_strategy = 'none'
else:
validation_strategy = self.validation_strategy
if validation_strategy == 'split':
if self.classification:
X, X_val, y, y_val = train_test_split(X, y, test_size=self.validation_fraction, stratify=y, random_state=self.random_state)
else:
X, X_val, y, y_val = train_test_split(X, y, test_size=self.validation_fraction, random_state=self.random_state)
X_original = X
y_original = y
if isinstance(self.cv, int) or isinstance(self.cv, float):
n_folds = self.cv
else:
n_folds = self.cv.get_n_splits(X, y)
if self.classification:
X, y = remove_underrepresented_classes(X, y, n_folds)
if self.preprocessing:
#X = pd.DataFrame(X)
if not isinstance(self.preprocessing, bool) and isinstance(self.preprocessing, sklearn.base.BaseEstimator):
self._preprocessing_pipeline = sklearn.base.clone(self.preprocessing)
#TODO: check if there are missing values in X before imputation. If not, don't include imputation in pipeline. Check if there are categorical columns. If not, don't include one hot encoding in pipeline
else: #if self.preprocessing is True or not a sklearn estimator
pipeline_steps = []
if self.categorical_features is not None: #if categorical features are specified, use those
pipeline_steps.append(("impute_categorical", tpot.builtin_modules.ColumnSimpleImputer(self.categorical_features, strategy='most_frequent')))
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("numeric", strategy='mean')))
pipeline_steps.append(("ColumnOneHotEncoder", tpot.builtin_modules.ColumnOneHotEncoder(self.categorical_features, min_frequency=0.0001))) # retain wrong param fix
else:
if isinstance(X, pd.DataFrame):
categorical_columns = X.select_dtypes(include=['object']).columns
if len(categorical_columns) > 0:
pipeline_steps.append(("impute_categorical", tpot.builtin_modules.ColumnSimpleImputer("categorical", strategy='most_frequent')))
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("numeric", strategy='mean')))
pipeline_steps.append(("ColumnOneHotEncoder", tpot.builtin_modules.ColumnOneHotEncoder("categorical", min_frequency=0.0001))) # retain wrong param fix
else:
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("all", strategy='mean')))
else:
pipeline_steps.append(("impute_numeric", tpot.builtin_modules.ColumnSimpleImputer("all", strategy='mean')))
self._preprocessing_pipeline = sklearn.pipeline.Pipeline(pipeline_steps)
X = self._preprocessing_pipeline.fit_transform(X, y)
else:
self._preprocessing_pipeline = None
#_, y = sklearn.utils.check_X_y(X, y, y_numeric=True)
#Set up the configuation dictionaries and the search spaces
#check if self.cv is a number
if isinstance(self.cv, int) or isinstance(self.cv, float):
if self.classification:
self.cv_gen = sklearn.model_selection.StratifiedKFold(n_splits=self.cv, shuffle=True, random_state=self.random_state)
else:
self.cv_gen = sklearn.model_selection.KFold(n_splits=self.cv, shuffle=True, random_state=self.random_state)
else:
self.cv_gen = sklearn.model_selection.check_cv(self.cv, y, classifier=self.classification)
n_samples= int(math.floor(X.shape[0]/n_folds))
n_features=X.shape[1]
if isinstance(X, pd.DataFrame):
self.feature_names = X.columns
else:
self.feature_names = None
def objective_function(pipeline_individual,
X,
y,
is_classification=self.classification,
scorers= self._scorers,
cv=self.cv_gen,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs):
return objective_function_generator(
pipeline_individual,
X,
y,
is_classification=is_classification,
scorers= scorers,
cv=cv,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)
if self.threshold_evaluation_pruning is not None or self.selection_evaluation_pruning is not None:
evaluation_early_stop_steps = self.cv
else:
evaluation_early_stop_steps = None
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
else:
X_future = X
y_future = y
if self.classification:
n_classes = len(np.unique(y))
else:
n_classes = None
get_search_space_params = {"n_classes": n_classes,
"n_samples":len(y),
"n_features":X.shape[1],
"random_state":self.random_state}
self._search_space = get_template_search_spaces(self.search_space, classification=self.classification, inner_predictors=True, **get_search_space_params)
# TODO : Add check for empty values in X and if so, add imputation to the search space
# make this depend on self.preprocessing
# if check_empty_values(X):
# from sklearn.experimental import enable_iterative_imputer
# from ConfigSpace import ConfigurationSpace
# from ConfigSpace import ConfigurationSpace, Integer, Float, Categorical, Normal
# iterative_imputer_cs = ConfigurationSpace(
# space = {
# 'n_nearest_features' : Categorical('n_nearest_features', [100]),
# 'initial_strategy' : Categorical('initial_strategy', ['mean','median', 'most_frequent', ]),
# 'add_indicator' : Categorical('add_indicator', [True, False]),
# }
# )
# imputation_search = tpot.search_spaces.pipelines.ChoicePipeline([
# tpot.config.get_search_space("SimpleImputer"),
# tpot.search_spaces.nodes.EstimatorNode(sklearn.impute.IterativeImputer, iterative_imputer_cs)
# ])
# self.search_space_final = tpot.search_spaces.pipelines.SequentialPipeline(search_spaces=[ imputation_search, self._search_space], memory="sklearn_pipeline_memory")
# else:
# self.search_space_final = self._search_space
self.search_space_final = self._search_space
def ind_generator(rng):
rng = np.random.default_rng(rng)
while True:
yield self.search_space_final.generate(rng)
#If warm start and we have an evolver instance, use the existing one
if not(self.warm_start and self._evolver_instance is not None):
self._evolver_instance = self._evolver( individual_generator=ind_generator(self.rng),
objective_functions= [objective_function],
objective_function_weights = self.objective_function_weights,
objective_names=self.objective_names,
bigger_is_better = self.bigger_is_better,
population_size= self.population_size,
generations=self.generations,
initial_population_size = self._initial_population_size,
n_jobs=self.n_jobs,
verbose = self.verbose,
max_time_mins = self.max_time_mins ,
max_eval_time_mins = self.max_eval_time_mins,
periodic_checkpoint_folder = self.periodic_checkpoint_folder,
threshold_evaluation_pruning = self.threshold_evaluation_pruning,
threshold_evaluation_scaling = self.threshold_evaluation_scaling,
min_history_threshold = self.min_history_threshold,
selection_evaluation_pruning = self.selection_evaluation_pruning,
selection_evaluation_scaling = self.selection_evaluation_scaling,
evaluation_early_stop_steps = evaluation_early_stop_steps,
early_stop_tol = self.early_stop_tol,
early_stop= self.early_stop,
budget_range = self.budget_range,
budget_scaling = self.budget_scaling,
generations_until_end_budget = self.generations_until_end_budget,
population_scaling = self.population_scaling,
generations_until_end_population = self.generations_until_end_population,
stepwise_steps = self.stepwise_steps,
client = _client,
objective_kwargs = {"X": X_future, "y": y_future},
survival_selector=self.survival_selector,
parent_selector=self.parent_selector,
survival_percentage = self.survival_percentage,
crossover_probability = self.crossover_probability,
mutate_probability = self.mutate_probability,
mutate_then_crossover_probability= self.mutate_then_crossover_probability,
crossover_then_mutate_probability= self.crossover_then_mutate_probability,
rng=self.rng,
)
self._evolver_instance.optimize()
#self._evolver_instance.population.update_pareto_fronts(self.objective_names, self.objective_function_weights)
self.make_evaluated_individuals()
tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=self.objective_names, weights=self.objective_function_weights)
if validation_strategy == 'reshuffled':
best_pareto_front_idx = list(self.pareto_front.index)
best_pareto_front = list(self.pareto_front.loc[best_pareto_front_idx]['Individual'])
#reshuffle rows
X, y = sklearn.utils.shuffle(X, y, random_state=self.random_state)
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
else:
X_future = X
y_future = y
val_objective_function_list = [lambda ind,
X,
y,
is_classification=self.classification,
scorers= self._scorers,
cv=self.cv_gen,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs: objective_function_generator(
ind,
X,
y,
is_classification=is_classification,
scorers= scorers,
cv=cv,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)]
objective_kwargs = {"X": X_future, "y": y_future}
val_scores, start_times, end_times, eval_errors = tpot.utils.eval_utils.parallel_eval_objective_list(best_pareto_front, val_objective_function_list, verbose=self.verbose, max_eval_time_mins=self.max_eval_time_mins, n_expected_columns=len(self.objective_names), client=_client, **objective_kwargs)
val_objective_names = ['validation_'+name for name in self.objective_names]
self.objective_names_for_selection = val_objective_names
self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_start_times'] = start_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_end_times'] = end_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_eval_errors'] = eval_errors
self.evaluated_individuals["Validation_Pareto_Front"] = tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights)
elif validation_strategy == 'split':
if self.scatter:
X_future = _client.scatter(X)
y_future = _client.scatter(y)
X_val_future = _client.scatter(X_val)
y_val_future = _client.scatter(y_val)
else:
X_future = X
y_future = y
X_val_future = X_val
y_val_future = y_val
objective_kwargs = {"X": X_future, "y": y_future, "X_val" : X_val_future, "y_val":y_val_future }
best_pareto_front_idx = list(self.pareto_front.index)
best_pareto_front = list(self.pareto_front.loc[best_pareto_front_idx]['Individual'])
val_objective_function_list = [lambda ind,
X,
y,
X_val,
y_val,
scorers= self._scorers,
other_objective_functions=self.other_objective_functions,
export_graphpipeline=self.export_graphpipeline,
memory=self.memory,
**kwargs: val_objective_function_generator(
ind,
X,
y,
X_val,
y_val,
scorers= scorers,
other_objective_functions=other_objective_functions,
export_graphpipeline=export_graphpipeline,
memory=memory,
**kwargs,
)]
val_scores, start_times, end_times, eval_errors = tpot.utils.eval_utils.parallel_eval_objective_list(best_pareto_front, val_objective_function_list, verbose=self.verbose, max_eval_time_mins=self.max_eval_time_mins, n_expected_columns=len(self.objective_names), client=_client, **objective_kwargs)
val_objective_names = ['validation_'+name for name in self.objective_names]
self.objective_names_for_selection = val_objective_names
self.evaluated_individuals.loc[best_pareto_front_idx,val_objective_names] = val_scores
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_start_times'] = start_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_end_times'] = end_times
self.evaluated_individuals.loc[best_pareto_front_idx,'validation_eval_errors'] = eval_errors
self.evaluated_individuals["Validation_Pareto_Front"] = tpot.utils.get_pareto_frontier(self.evaluated_individuals, column_names=val_objective_names, weights=self.objective_function_weights)
else:
self.objective_names_for_selection = self.objective_names
val_scores = self.evaluated_individuals[self.evaluated_individuals[self.objective_names_for_selection].isna().all(1).ne(True)][self.objective_names_for_selection]
weighted_scores = val_scores*self.objective_function_weights
if self.bigger_is_better:
best_indices = list(weighted_scores.sort_values(by=self.objective_names_for_selection, ascending=False).index)
else:
best_indices = list(weighted_scores.sort_values(by=self.objective_names_for_selection, ascending=True).index)
for best_idx in best_indices:
best_individual = self.evaluated_individuals.loc[best_idx]['Individual']
self.selected_best_score = self.evaluated_individuals.loc[best_idx]
#TODO
#best_individual_pipeline = best_individual.export_pipeline(memory=self.memory, cross_val_predict_cv=self.cross_val_predict_cv)
if self.export_graphpipeline:
best_individual_pipeline = best_individual.export_flattened_graphpipeline(memory=self.memory)
else:
best_individual_pipeline = best_individual.export_pipeline(memory=self.memory)
if self.preprocessing:
self.fitted_pipeline_ = sklearn.pipeline.make_pipeline(sklearn.base.clone(self._preprocessing_pipeline), best_individual_pipeline )
else:
self.fitted_pipeline_ = best_individual_pipeline
try:
self.fitted_pipeline_.fit(X_original,y_original) #TODO use y_original as well?
break
except Exception as e:
if self.verbose >= 4:
warnings.warn("Final pipeline failed to fit. Rarely, the pipeline might work on the objective function but fail on the full dataset. Generally due to interactions with different features being selected or transformations having different properties. Trying next pipeline")
print(e)
continue
if self.client is None: #no client was passed in
#close cluster and client
# _client.close()
# cluster.close()
try:
_client.shutdown()
cluster.close()
#catch exception
except Exception as e:
print("Error shutting down client and cluster")
Warning(e)
return self
def _estimator_has(attr):
'''Check if we can delegate a method to the underlying estimator.
First, we check the first fitted final estimator if available, otherwise we
check the unfitted final estimator.
'''
return lambda self: (self.fitted_pipeline_ is not None and
hasattr(self.fitted_pipeline_, attr)
)
@available_if(_estimator_has('predict'))
def predict(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
preds = self.fitted_pipeline_.predict(X,**predict_params)
if self.classification and self.label_encoder_:
preds = self.label_encoder_.inverse_transform(preds)
return preds
@available_if(_estimator_has('predict_proba'))
def predict_proba(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.predict_proba(X,**predict_params)
@available_if(_estimator_has('decision_function'))
def decision_function(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.decision_function(X,**predict_params)
@available_if(_estimator_has('transform'))
def transform(self, X, **predict_params):
check_is_fitted(self)
#X = check_array(X)
return self.fitted_pipeline_.transform(X,**predict_params)
@property
def classes_(self):
"""The classes labels. Only exist if the last step is a classifier."""
if self.label_encoder_:
return self.label_encoder_.classes_
else:
return self.fitted_pipeline_.classes_
@property
def _estimator_type(self):
return self.fitted_pipeline_._estimator_type
def __sklearn_tags__(self):
if hasattr(self, 'fitted_pipeline_'): #if fitted
try:
tags = copy.deepcopy(self.fitted_pipeline_.__sklearn_tags__())
except:
tags = copy.deepcopy(get_tags(self.fitted_pipeline_))
else: #if not fitted
tags = super().__sklearn_tags__()
if self.random_state is None:
tags.non_deterministic = False
if self.classification:
if tags.classifier_tags is None:
tags.classifier_tags = sklearn.utils.ClassifierTags()
tags.classifier_tags.multi_class = True
tags.classifier_tags.multi_label = True
return tags
def make_evaluated_individuals(self):
#check if _evolver_instance exists
if self.evaluated_individuals is None:
self.evaluated_individuals = self._evolver_instance.population.evaluated_individuals.copy()
objects = list(self.evaluated_individuals.index)
object_to_int = dict(zip(objects, range(len(objects))))
self.evaluated_individuals = self.evaluated_individuals.set_index(self.evaluated_individuals.index.map(object_to_int))
self.evaluated_individuals['Parents'] = self.evaluated_individuals['Parents'].apply(lambda row: convert_parents_tuples_to_integers(row, object_to_int))
self.evaluated_individuals["Instance"] = self.evaluated_individuals["Individual"].apply(lambda ind: apply_make_pipeline(ind, preprocessing_pipeline=self._preprocessing_pipeline, export_graphpipeline=self.export_graphpipeline, memory=self.memory))
return self.evaluated_individuals
@property
def pareto_front(self):
#check if _evolver_instance exists
if self.evaluated_individuals is None:
return None
else:
if "Pareto_Front" not in self.evaluated_individuals:
return self.evaluated_individuals
else:
return self.evaluated_individuals[self.evaluated_individuals["Pareto_Front"]==1]
def check_empty_values(data):
"""
Checks for empty values in a dataset.
Args:
data (numpy.ndarray or pandas.DataFrame): The dataset to check.
Returns:
bool: True if the dataset contains empty values, False otherwise.
"""
if isinstance(data, pd.DataFrame):
return data.isnull().values.any()
elif isinstance(data, np.ndarray):
return np.isnan(data).any()
else:
raise ValueError("Unsupported data type")
|
TPOTEstimator
|
python
|
has2k1__plotnine
|
plotnine/iapi.py
|
{
"start": 8204,
"end": 8411
}
|
class ____:
"""
What is required to layout an inside legend
"""
box: FlexibleAnchoredOffsetbox
justification: tuple[float, float]
position: tuple[float, float]
@dataclass
|
inside_legend
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.