language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | tools/test/test_header_only_linter.py | {
"start": 215,
"end": 3777
} | class ____(unittest.TestCase):
"""
Test the header only linter functionality
"""
def test_find_matched_symbols(self) -> None:
sample_regex = re.compile("symDef|symD|symC|bbb|a")
test_globs = ["tools/test/header_only_linter_testdata/*.cpp"]
expected_matches = {"symDef", "symC", "a"}
self.assertEqual(
find_matched_symbols(sample_regex, test_globs), expected_matches
)
def test_find_matched_symbols_empty_regex(self) -> None:
sample_regex = re.compile("")
test_globs = ["tools/test/header_only_linter_testdata/*.cpp"]
expected_matches: set[str] = set()
self.assertEqual(
find_matched_symbols(sample_regex, test_globs), expected_matches
)
def test_check_file_no_issues(self) -> None:
sample_txt = str(REPO_ROOT / "tools/test/header_only_linter_testdata/good.txt")
test_globs = ["tools/test/header_only_linter_testdata/*.cpp"]
self.assertEqual(len(check_file(sample_txt, test_globs)), 0)
def test_check_empty_file(self) -> None:
sample_txt = str(REPO_ROOT / "tools/test/header_only_linter_testdata/empty.txt")
test_globs = ["tools/test/header_only_linter_testdata/*.cpp"]
self.assertEqual(len(check_file(sample_txt, test_globs)), 0)
def test_check_file_with_untested_symbols(self) -> None:
sample_txt = str(REPO_ROOT / "tools/test/header_only_linter_testdata/bad.txt")
test_globs = ["tools/test/header_only_linter_testdata/*.cpp"]
expected_msgs = [
LintMessage(
path=sample_txt,
line=7,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[untested-symbol]",
original=None,
replacement=None,
description=(
f"bbb has been included as a header-only API "
"but is not tested in any of CPP_TEST_GLOBS, which "
f"contains {CPP_TEST_GLOBS}.\n"
"Please add a .cpp test using the symbol without "
"linking anything to verify that the symbol is in "
"fact header-only. If you already have a test but it's"
" not found, please add the .cpp file to CPP_TEST_GLOBS"
" in tools/linters/adapters/header_only_linter.py."
),
),
LintMessage(
path=sample_txt,
line=8,
char=None,
code=LINTER_CODE,
severity=LintSeverity.ERROR,
name="[untested-symbol]",
original=None,
replacement=None,
description=(
f"symD has been included as a header-only API "
"but is not tested in any of CPP_TEST_GLOBS, which "
f"contains {CPP_TEST_GLOBS}.\n"
"Please add a .cpp test using the symbol without "
"linking anything to verify that the symbol is in "
"fact header-only. If you already have a test but it's"
" not found, please add the .cpp file to CPP_TEST_GLOBS"
" in tools/linters/adapters/header_only_linter.py."
),
),
]
self.assertEqual(set(check_file(sample_txt, test_globs)), set(expected_msgs))
if __name__ == "__main__":
unittest.main()
| TestHeaderOnlyLinter |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 493268,
"end": 493914
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("PinnedDiscussionEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("PinnedDiscussion"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| PinnedDiscussionConnection |
python | davidhalter__jedi | jedi/inference/gradual/base.py | {
"start": 14075,
"end": 14655
} | class ____(DefineGenericBaseClass):
def __init__(self, parent_context, tree_name, generics_manager):
super().__init__(generics_manager)
self.inference_state = parent_context.inference_state
self.parent_context = parent_context
self._tree_name = tree_name
def _get_wrapped_value(self):
return _PseudoTreeNameClass(self.parent_context, self._tree_name)
def __repr__(self):
return '%s(%s%s)' % (self.__class__.__name__, self._tree_name.value,
self._generics_manager)
| BaseTypingClassWithGenerics |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 98417,
"end": 98953
} | class ____(nn.Conv2d):
"""
Conv2d layer with padding="same" support. Source:
https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zero_pad_2d = nn.ZeroPad2d(
reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]])
)
def forward(self, input):
return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
| Conv2dSamePadding |
python | pypa__pip | src/pip/_internal/network/auth.py | {
"start": 1645,
"end": 2817
} | class ____(KeyRingBaseProvider):
"""Keyring interface which uses locally imported `keyring`"""
has_keyring = True
def __init__(self) -> None:
import keyring
self.keyring = keyring
def get_auth_info(self, url: str, username: str | None) -> AuthInfo | None:
# Support keyring's get_credential interface which supports getting
# credentials without a username. This is only available for
# keyring>=15.2.0.
if hasattr(self.keyring, "get_credential"):
logger.debug("Getting credentials from keyring for %s", url)
cred = self.keyring.get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username is not None:
logger.debug("Getting password from keyring for %s", url)
password = self.keyring.get_password(url, username)
if password:
return username, password
return None
def save_auth_info(self, url: str, username: str, password: str) -> None:
self.keyring.set_password(url, username, password)
| KeyRingPythonProvider |
python | joke2k__faker | faker/providers/lorem/ru_RU/__init__.py | {
"start": 68,
"end": 9846
} | class ____(LoremProvider):
"""Implement lorem provider for ``ru_RU`` locale."""
word_list = (
"войти",
"монета",
"вскинуть",
"желание",
"экзамен",
"налоговый",
"вытаскивать",
"приятель",
"вздрагивать",
"куча",
"порт",
"точно",
"заплакать",
"изба",
"правление",
"художественный",
"мучительно",
"изображать",
"фонарик",
"миф",
"грустный",
"опасность",
"мера",
"пастух",
"факультет",
"мелькнуть",
"полевой",
"другой",
"выраженный",
"забирать",
"рот",
"народ",
"соответствие",
"тута",
"коммунизм",
"решение",
"плод",
"собеседник",
"возмутиться",
"достоинство",
"господь",
"болото",
"инфекция",
"голубчик",
"сынок",
"пространство",
"прощение",
"прежде",
"хотеть",
"ленинград",
"даль",
"развитый",
"близко",
"более",
"спорт",
"эпоха",
"ответить",
"освободить",
"совет",
"проход",
"палец",
"вчера",
"приличный",
"ярко",
"белье",
"кузнец",
"неожиданно",
"вперед",
"зато",
"кольцо",
"передо",
"мгновение",
"плавно",
"табак",
"число",
"изучить",
"тяжелый",
"рассуждение",
"салон",
"идея",
"что",
"светило",
"порода",
"сомнительный",
"бок",
"очко",
"неудобно",
"советовать",
"отдел",
"помолчать",
"поздравлять",
"пробовать",
"дошлый",
"смеяться",
"упорно",
"вздрогнуть",
"затянуться",
"танцевать",
"песенка",
"выбирать",
"правильный",
"намерение",
"издали",
"запустить",
"наслаждение",
"крыса",
"лететь",
"космос",
"радость",
"поезд",
"находить",
"гулять",
"горький",
"бочок",
"ночь",
"счастье",
"уничтожение",
"дьявол",
"коробка",
"спасть",
"кожа",
"провинция",
"прелесть",
"тюрьма",
"низкий",
"сверкать",
"темнеть",
"солнце",
"дружно",
"настать",
"блин",
"степь",
"самостоятельно",
"крутой",
"картинка",
"зачем",
"рабочий",
"необычный",
"армейский",
"труп",
"ягода",
"около",
"монета",
"естественный",
"юный",
"район",
"скрытый",
"поймать",
"строительство",
"палата",
"миг",
"триста",
"штаб",
"ломать",
"возможно",
"полюбить",
"человечек",
"легко",
"чувство",
"ручей",
"карман",
"деньги",
"неправда",
"сравнение",
"грудь",
"отъезд",
"возникновение",
"степь",
"возбуждение",
"деловой",
"следовательно",
"жидкий",
"сынок",
"художественный",
"поколение",
"расстегнуть",
"пища",
"ученый",
"секунда",
"успокоиться",
"вряд",
"аж",
"вскакивать",
"мимо",
"падать",
"потянуться",
"угроза",
"растеряться",
"бегать",
"стакан",
"о",
"кпсс",
"ныне",
"пол",
"реклама",
"при",
"школьный",
"премьера",
"дальний",
"потрясти",
"освобождение",
"покидать",
"наступать",
"жить",
"какой",
"обида",
"командование",
"девка",
"выражаться",
"головной",
"второй",
"князь",
"социалистический",
"головка",
"привлекать",
"через",
"господь",
"результат",
"отметить",
"ведь",
"падаль",
"покидать",
"художественный",
"правый",
"висеть",
"лапа",
"каюта",
"слишком",
"нервно",
"серьезный",
"зима",
"заработать",
"эффект",
"пропасть",
"плод",
"что",
"висеть",
"холодно",
"единый",
"выкинуть",
"мрачно",
"выгнать",
"умирать",
"иной",
"космос",
"природа",
"функция",
"поставить",
"оборот",
"услать",
"очередной",
"медицина",
"функция",
"зарплата",
"выдержать",
"расстройство",
"адвокат",
"задержать",
"появление",
"инвалид",
"интеллектуальный",
"исследование",
"господь",
"смертельный",
"спичка",
"вариант",
"рай",
"одиннадцать",
"чем",
"манера",
"магазин",
"поговорить",
"полоска",
"помимо",
"построить",
"домашний",
"механический",
"сохранять",
"отражение",
"научить",
"тесно",
"аллея",
"прежний",
"посидеть",
"славный",
"очутиться",
"лететь",
"невозможно",
"порядок",
"выразить",
"спешить",
"сынок",
"ребятишки",
"угроза",
"оставить",
"цвет",
"налево",
"парень",
"миллиард",
"горький",
"трубка",
"подробность",
"пасть",
"непривычный",
"угодный",
"засунуть",
"цель",
"запретить",
"дремать",
"разуметься",
"приходить",
"совещание",
"постоянный",
"анализ",
"терапия",
"приятель",
"процесс",
"академик",
"металл",
"развернуться",
"жестокий",
"интернет",
"банда",
"изменение",
"коллектив",
"похороны",
"устройство",
"торопливый",
"разводить",
"промолчать",
"подземный",
"пламя",
"редактор",
"теория",
"карандаш",
"упор",
"означать",
"бабочка",
"четыре",
"столетие",
"разнообразный",
"витрина",
"нож",
"команда",
"шлем",
"недостаток",
"протягивать",
"за",
"металл",
"добиться",
"сутки",
"четко",
"предоставить",
"тысяча",
"запеть",
"бригада",
"мелочь",
"выраженный",
"пересечь",
"сходить",
"вообще",
"рис",
"банк",
"бак",
"передо",
"назначить",
"важный",
"правление",
"палка",
"трясти",
"уронить",
"витрина",
"основание",
"да",
"перебивать",
"дыхание",
"применяться",
"июнь",
"бетонный",
"избегать",
"умолять",
"мягкий",
"заявление",
"конференция",
"встать",
"свежий",
"сопровождаться",
"цепочка",
"выражение",
"угол",
"ботинок",
"ложиться",
"инструкция",
"присесть",
"решетка",
"еврейский",
"порог",
"зеленый",
"граница",
"ставить",
"смелый",
"сустав",
"роса",
"демократия",
"вывести",
"конструкция",
"задрать",
"багровый",
"военный",
"направо",
"житель",
"товар",
"неправда",
"материя",
"командующий",
"кидать",
"заложить",
"лиловый",
"слать",
"горький",
"пространство",
"провал",
"мусор",
"наткнуться",
"торговля",
"монета",
"место",
"спалить",
"бровь",
"левый",
"хлеб",
"коричневый",
"потом",
"страсть",
"виднеться",
"роскошный",
"способ",
"костер",
"заведение",
"пропадать",
"слишком",
"пятеро",
"мальчишка",
"тусклый",
"неожиданный",
"плясать",
"дурацкий",
"дрогнуть",
"сбросить",
"прошептать",
"беспомощный",
"рота",
"песня",
"тревога",
"некоторый",
"термин",
"нажать",
"видимо",
"валюта",
"набор",
"боец",
"райком",
"новый",
"скользить",
"руководитель",
"волк",
"изредка",
"понятный",
"пропаганда",
"остановить",
"исполнять",
"ход",
"госпожа",
"печатать",
"командир",
"снимать",
"казнь",
"невыносимый",
"спорт",
"тревога",
"уточнить",
"актриса",
"полностью",
"покинуть",
"сверкающий",
"мотоцикл",
"дорогой",
"указанный",
"ремень",
"посвятить",
"один",
"а",
"доставать",
"хозяйка",
"носок",
"написать",
"еврейский",
"призыв",
"увеличиваться",
"равнодушный",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | pallets__jinja | tests/test_lexnparse.py | {
"start": 361,
"end": 1139
} | class ____:
test_tokens = [
Token(1, TOKEN_BLOCK_BEGIN, ""),
Token(2, TOKEN_BLOCK_END, ""),
]
def test_simple(self, env):
ts = TokenStream(self.test_tokens, "foo", "bar")
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert not bool(ts)
assert bool(ts.eos)
def test_iter(self, env):
token_types = [t.type for t in TokenStream(self.test_tokens, "foo", "bar")]
assert token_types == [
"block_begin",
"block_end",
]
| TestTokenStream |
python | bokeh__bokeh | src/bokeh/models/ui/menus.py | {
"start": 1742,
"end": 3513
} | class ____(Model):
""" A basic menu item with an icon, label, shortcut, sub-menu and an associated action.
Only label is required. All other properties are optional.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
checked = Nullable(Bool, default=None, help="""
Whether an item is marked as checked/active.
Checked item is represented with a tick mark on the left hand side
of an item. Unchecked item is represented with an empty space.
The menu will allocate a column for check marks for all its items if
at least one item has set a boolean value for ``checked`` property.
""")
icon = Nullable(IconLike, help="""
An optional icon to display left of the label.
""")
label = Required(String, help="""
A plain text string label.
""")
shortcut = Nullable(String, default=None, help="""
An optional string representing the keyboard sequence triggering the action.
.. note::
This is only a UI hint for the user. Menus on their own don't implement
any support for triggering actions based on keyboard inputs.
""")
menu = Nullable(Instance(lambda: Menu), default=None, help="""
An optional sub-menu showed when hovering over this item.
""")
tooltip = Nullable(String, default=None, help="""
An optional plain text description showed when hovering over this item.
""")
disabled = Bool(default=False, help="""
Indicates whether clicking on the item activates the associated action.
""")
action = Nullable(Instance(Callback), default=None, help="""
An optional action (callback) associated with this item.
""")
| MenuItem |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 8597,
"end": 8849
} | class ____:
# TODO merge with _DictMixin?
def get_mapping_item_values(self):
return self._dict_keys(), self._dict_values()
def get_key_values(self):
# TODO merge with _dict_keys?
return self._dict_keys()
| _DictKeyMixin |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 28597,
"end": 28781
} | class ____(BaseModel, extra="forbid"):
"""
Delete alias if exists
"""
delete_alias: "DeleteAlias" = Field(..., description="Delete alias if exists")
| DeleteAliasOperation |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 13506,
"end": 15161
} | class ____(Rule):
theta: Expr
func: Expr
rewritten: Expr
substep: Rule
restriction: bool | Boolean
def eval(self) -> Expr:
theta, func, x = self.theta, self.func, self.variable
func = func.subs(sec(theta), 1/cos(theta))
func = func.subs(csc(theta), 1/sin(theta))
func = func.subs(cot(theta), 1/tan(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = solve(x - func, trig_function)
assert len(relation) == 1
numer, denom = fraction(relation[0])
if isinstance(trig_function, sin):
opposite = numer
hypotenuse = denom
adjacent = sqrt(denom**2 - numer**2)
inverse = asin(relation[0])
elif isinstance(trig_function, cos):
adjacent = numer
hypotenuse = denom
opposite = sqrt(denom**2 - numer**2)
inverse = acos(relation[0])
else: # tan
opposite = numer
adjacent = denom
hypotenuse = sqrt(denom**2 + numer**2)
inverse = atan(relation[0])
substitution = [
(sin(theta), opposite/hypotenuse),
(cos(theta), adjacent/hypotenuse),
(tan(theta), opposite/adjacent),
(theta, inverse)
]
return Piecewise(
(self.substep.eval().subs(substitution).trigsimp(), self.restriction) # type: ignore
)
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
| TrigSubstitutionRule |
python | mlflow__mlflow | mlflow/projects/kubernetes.py | {
"start": 3481,
"end": 6363
} | class ____(SubmittedRun):
"""
Instance of SubmittedRun corresponding to a Kubernetes Job run launched to run an MLflow
project.
Args:
mlflow_run_id: ID of the MLflow project run.
job_name: Kubernetes job name.
job_namespace: Kubernetes job namespace.
"""
# How often to poll run status when waiting on a run
POLL_STATUS_INTERVAL = 5
def __init__(self, mlflow_run_id, job_name, job_namespace):
super().__init__()
self._mlflow_run_id = mlflow_run_id
self._job_name = job_name
self._job_namespace = job_namespace
self._status = RunStatus.SCHEDULED
self._status_lock = RLock()
self._kube_api = kubernetes.client.BatchV1Api()
@property
def run_id(self):
return self._mlflow_run_id
def wait(self):
while not RunStatus.is_terminated(self._update_status()):
time.sleep(self.POLL_STATUS_INTERVAL)
return self._status == RunStatus.FINISHED
def _update_status(self):
api_response = self._kube_api.read_namespaced_job_status(
name=self._job_name, namespace=self._job_namespace, pretty=True
)
status = api_response.status
with self._status_lock:
if RunStatus.is_terminated(self._status):
return self._status
if self._status == RunStatus.SCHEDULED:
if api_response.status.start_time is None:
_logger.info("Waiting for Job to start")
else:
_logger.info("Job started.")
self._status = RunStatus.RUNNING
if status.conditions is not None:
for condition in status.conditions:
if condition.status == "True":
_logger.info(condition.message)
if condition.type == "Failed":
self._status = RunStatus.FAILED
elif condition.type == "Complete":
self._status = RunStatus.FINISHED
return self._status
def get_status(self):
status = self._status
return status if RunStatus.is_terminated(status) else self._update_status()
def cancel(self):
with self._status_lock:
if not RunStatus.is_terminated(self._status):
_logger.info("Cancelling job.")
self._kube_api.delete_namespaced_job(
name=self._job_name,
namespace=self._job_namespace,
body=kubernetes.client.V1DeleteOptions(),
pretty=True,
)
self._status = RunStatus.KILLED
_logger.info("Job cancelled.")
else:
_logger.info("Attempting to cancel a job that is already terminated.")
| KubernetesSubmittedRun |
python | scipy__scipy | scipy/stats/tests/test_generation/reference_distributions.py | {
"start": 15120,
"end": 15417
} | class ____(ReferenceDistribution):
def __init__(self, *, b):
super().__init__(b=b)
def _support(self, b):
return 0, b
def _pdf(self, x, b):
return -mp.exp(-x)/mp.expm1(-b)
def _sf(self, x, b):
return (mp.exp(-b) - mp.exp(-x))/mp.expm1(-b)
| TruncExpon |
python | spack__spack | lib/spack/spack/filesystem_view.py | {
"start": 22857,
"end": 30844
} | class ____(FilesystemView):
"""A simple and partial implementation of FilesystemView focused on performance and immutable
views, where specs cannot be removed after they were added."""
def _sanity_check_view_projection(self, specs):
"""A very common issue is that we end up with two specs of the same package, that project
to the same prefix. We want to catch that as early as possible and give a sensible error to
the user. Here we use the metadata dir (.spack) projection as a quick test to see whether
two specs in the view are going to clash. The metadata dir is used because it's always
added by Spack with identical files, so a guaranteed clash that's easily verified."""
seen = {}
for current_spec in specs:
metadata_dir = self.relative_metadata_dir_for_spec(current_spec)
conflicting_spec = seen.get(metadata_dir)
if conflicting_spec:
raise ConflictingSpecsError(current_spec, conflicting_spec)
seen[metadata_dir] = current_spec
def add_specs(self, *specs, **kwargs) -> None:
"""Link a root-to-leaf topologically ordered list of specs into the view."""
assert all((s.concrete for s in specs))
if len(specs) == 0:
return
# Drop externals
specs = [s for s in specs if not s.external]
self._sanity_check_view_projection(specs)
# Ignore spack meta data folder.
def skip_list(file):
return os.path.basename(file) == spack.store.STORE.layout.metadata_dir
# Determine if the root is on a case-insensitive filesystem
normalize_paths = is_folder_on_case_insensitive_filesystem(self._root)
visitor = SourceMergeVisitor(ignore=skip_list, normalize_paths=normalize_paths)
# Gather all the directories to be made and files to be linked
for spec in specs:
src_prefix = spec.package.view_source()
visitor.set_projection(self.get_relative_projection_for_spec(spec))
visit_directory_tree(src_prefix, visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(visitor))
# Throw on fatal dir-file conflicts.
if visitor.fatal_conflicts:
raise MergeConflictSummary(visitor.fatal_conflicts)
# Inform about file-file conflicts.
if visitor.file_conflicts:
if self.ignore_conflicts:
tty.debug(f"{len(visitor.file_conflicts)} file conflicts")
else:
raise MergeConflictSummary(visitor.file_conflicts)
tty.debug(f"Creating {len(visitor.directories)} dirs and {len(visitor.files)} links")
# Make the directory structure
for dst in visitor.directories:
os.mkdir(os.path.join(self._root, dst))
# Link the files using a "merge map": full src => full dst
merge_map_per_prefix = self._source_merge_visitor_to_merge_map(visitor)
for spec in specs:
merge_map = merge_map_per_prefix.get(spec.package.view_source(), None)
if not merge_map:
# Not every spec may have files to contribute.
continue
spec.package.add_files_to_view(self, merge_map, skip_if_exists=False)
# Finally create the metadata dirs.
self.link_metadata(specs)
def _source_merge_visitor_to_merge_map(self, visitor: SourceMergeVisitor):
# For compatibility with add_files_to_view, we have to create a
# merge_map of the form join(src_root, src_rel) => join(dst_root, dst_rel),
# but our visitor.files format is dst_rel => (src_root, src_rel).
# We exploit that visitor.files is an ordered dict, and files per source
# prefix are contiguous.
source_root = lambda item: item[1][0]
per_source = itertools.groupby(visitor.files.items(), key=source_root)
return {
src_root: {
os.path.join(src_root, src_rel): os.path.join(self._root, dst_rel)
for dst_rel, (_, src_rel) in group
}
for src_root, group in per_source
}
def relative_metadata_dir_for_spec(self, spec):
return os.path.join(
self.get_relative_projection_for_spec(spec),
spack.store.STORE.layout.metadata_dir,
spec.name,
)
def link_metadata(self, specs):
metadata_visitor = SourceMergeVisitor()
for spec in specs:
src_prefix = os.path.join(
spec.package.view_source(), spack.store.STORE.layout.metadata_dir
)
proj = self.relative_metadata_dir_for_spec(spec)
metadata_visitor.set_projection(proj)
visit_directory_tree(src_prefix, metadata_visitor)
# Check for conflicts in destination dir.
visit_directory_tree(self._root, DestinationMergeVisitor(metadata_visitor))
# Throw on dir-file conflicts -- unlikely, but who knows.
if metadata_visitor.fatal_conflicts:
raise MergeConflictSummary(metadata_visitor.fatal_conflicts)
# We are strict here for historical reasons
if metadata_visitor.file_conflicts:
raise MergeConflictSummary(metadata_visitor.file_conflicts)
for dst in metadata_visitor.directories:
os.mkdir(os.path.join(self._root, dst))
for dst_relpath, (src_root, src_relpath) in metadata_visitor.files.items():
self.link(os.path.join(src_root, src_relpath), os.path.join(self._root, dst_relpath))
def get_relative_projection_for_spec(self, spec):
# Extensions are placed by their extendee, not by their own spec
if spec.package.extendee_spec:
spec = spec.package.extendee_spec
p = spack.projections.get_projection(self.projections, spec)
return spec.format_path(p) if p else ""
def get_projection_for_spec(self, spec):
"""
Return the projection for a spec in this view.
Relies on the ordering of projections to avoid ambiguity.
"""
spec = spack.spec.Spec(spec)
if spec.package.extendee_spec:
spec = spec.package.extendee_spec
proj = spack.projections.get_projection(self.projections, spec)
if proj:
return os.path.join(self._root, spec.format_path(proj))
return self._root
#####################
# utility functions #
#####################
def get_spec_from_file(filename) -> Optional[spack.spec.Spec]:
try:
with open(filename, "r", encoding="utf-8") as f:
return spack.spec.Spec.from_yaml(f)
except OSError:
return None
def colorize_root(root):
colorize = ft.partial(tty.color.colorize, color=sys.stdout.isatty())
pre, post = map(colorize, "@M[@. @M]@.".split())
return "".join([pre, root, post])
def colorize_spec(spec):
"Colorize spec output if in TTY."
if sys.stdout.isatty():
return spec.cshort_spec
else:
return spec.short_spec
def find_dependents(all_specs, providers, deptype="run"):
"""
Return a set containing all those specs from all_specs that depend on
providers at the given dependency type.
"""
dependents = set()
for s in all_specs:
for dep in s.traverse(deptype=deptype):
if dep in providers:
dependents.add(s)
return dependents
def filter_exclude(specs, exclude):
"Filter specs given sequence of exclude regex"
to_exclude = [re.compile(e) for e in exclude]
def keep(spec):
for e in to_exclude:
if e.match(spec.name):
return False
return True
return filter(keep, specs)
def get_dependencies(specs):
"Get set of dependencies (includes specs)"
retval = set()
set(map(retval.update, (set(s.traverse()) for s in specs)))
return retval
| SimpleFilesystemView |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 22369,
"end": 22468
} | class ____(Qwen3VLMoeCausalLMOutputWithPast):
pass
@auto_docstring
| Glm4vMoeCausalLMOutputWithPast |
python | joke2k__faker | faker/providers/phone_number/da_DK/__init__.py | {
"start": 49,
"end": 248
} | class ____(PhoneNumberProvider):
formats = (
"+45 ########",
"+45 #### ####",
"+45 ## ## ## ##",
"########",
"#### ####",
"## ## ## ##",
)
| Provider |
python | celery__celery | t/unit/app/test_annotations.py | {
"start": 102,
"end": 137
} | class ____:
foo = 65
| MyAnnotation |
python | numba__numba | numba/cuda/tests/cudadrv/test_deallocations.py | {
"start": 4740,
"end": 4964
} | class ____(CUDATestCase):
def test_context_manager(self):
# just make sure the API is available
with cuda.defer_cleanup():
pass
@skip_on_cudasim('not supported on CUDASIM')
| TestDeferCleanupAvail |
python | kamyu104__LeetCode-Solutions | Python/maximum-partition-factor.py | {
"start": 3644,
"end": 5464
} | class ____(object):
def maxPartitionFactor(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
INF = float("inf")
def binary_search_right(left, right, check):
while left <= right:
mid = left+(right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return right
def dist(u, v):
return abs(points[u][0]-points[v][0])+abs(points[u][1]-points[v][1])
def is_bipartite(d):
def bfs(u):
if lookup[u] != -1:
return True
lookup[u] = 0
q = [u]
while q:
new_q = []
for u in q:
for v in xrange(len(points)):
if not (v != u and dist(v, u) < d):
continue
if lookup[v] != -1:
if lookup[v] != lookup[u]^1:
return False
continue
lookup[v] = lookup[u]^1
new_q.append(v)
q = new_q
return True
lookup = [-1]*len(points)
return all(bfs(u) for u in xrange(len(points)))
sorted_dists = sorted({dist(u, v) for u in xrange(len(points)) for v in xrange(u+1, len(points))}|{INF})
left, right = 0, len(sorted_dists)-1
result = binary_search_right(left, right, lambda i: is_bipartite(sorted_dists[i]))
return sorted_dists[result] if sorted_dists[result] != INF else 0
# Time: O(n^2 * logr)
# Space: O(n)
# binary search, bfs
| Solution3 |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 786450,
"end": 804142
} | class ____(
MarkPropDefnumber, NumericMarkPropDef
):
r"""
FieldOrDatumDefWithConditionMarkPropFieldDefnumber schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {
"$ref": "#/definitions/FieldOrDatumDefWithCondition<MarkPropFieldDef,number>"
}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
| FieldOrDatumDefWithConditionMarkPropFieldDefnumber |
python | getsentry__sentry-python | sentry_sdk/integrations/django/__init__.py | {
"start": 3262,
"end": 18159
} | class ____(Integration):
"""
Auto instrument a Django application.
:param transaction_style: How to derive transaction names. Either `"function_name"` or `"url"`. Defaults to `"url"`.
:param middleware_spans: Whether to create spans for middleware. Defaults to `True`.
:param signals_spans: Whether to create spans for signals. Defaults to `True`.
:param signals_denylist: A list of signals to ignore when creating spans.
:param cache_spans: Whether to create spans for cache operations. Defaults to `False`.
"""
identifier = "django"
origin = f"auto.http.{identifier}"
origin_db = f"auto.db.{identifier}"
transaction_style = ""
middleware_spans = None
signals_spans = None
cache_spans = None
signals_denylist = [] # type: list[signals.Signal]
def __init__(
self,
transaction_style="url", # type: str
middleware_spans=True, # type: bool
signals_spans=True, # type: bool
cache_spans=False, # type: bool
db_transaction_spans=False, # type: bool
signals_denylist=None, # type: Optional[list[signals.Signal]]
http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE, # type: tuple[str, ...]
):
# type: (...) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
self.middleware_spans = middleware_spans
self.signals_spans = signals_spans
self.signals_denylist = signals_denylist or []
self.cache_spans = cache_spans
self.db_transaction_spans = db_transaction_spans
self.http_methods_to_capture = tuple(map(str.upper, http_methods_to_capture))
@staticmethod
def setup_once():
# type: () -> None
_check_minimum_version(DjangoIntegration, DJANGO_VERSION)
install_sql_hook()
# Patch in our custom middleware.
# logs an error for every 500
ignore_logger("django.server")
ignore_logger("django.request")
from django.core.handlers.wsgi import WSGIHandler
old_app = WSGIHandler.__call__
@ensure_integration_enabled(DjangoIntegration, old_app)
def sentry_patched_wsgi_handler(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
bound_old_app = old_app.__get__(self, WSGIHandler)
from django.conf import settings
use_x_forwarded_for = settings.USE_X_FORWARDED_HOST
integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
middleware = SentryWsgiMiddleware(
bound_old_app,
use_x_forwarded_for,
span_origin=DjangoIntegration.origin,
http_methods_to_capture=(
integration.http_methods_to_capture
if integration
else DEFAULT_HTTP_METHODS_TO_CAPTURE
),
)
return middleware(environ, start_response)
WSGIHandler.__call__ = sentry_patched_wsgi_handler
_patch_get_response()
_patch_django_asgi_handler()
signals.got_request_exception.connect(_got_request_exception)
@add_global_event_processor
def process_django_templates(event, hint):
# type: (Event, Optional[Hint]) -> Optional[Event]
if hint is None:
return event
exc_info = hint.get("exc_info", None)
if exc_info is None:
return event
exception = event.get("exception", None)
if exception is None:
return event
values = exception.get("values", None)
if values is None:
return event
for exception, (_, exc_value, _) in zip(
reversed(values), walk_exception_chain(exc_info)
):
frame = get_template_frame_from_exception(exc_value)
if frame is not None:
frames = exception.get("stacktrace", {}).get("frames", [])
for i in reversed(range(len(frames))):
f = frames[i]
if (
f.get("function") in ("Parser.parse", "parse", "render")
and f.get("module") == "django.template.base"
):
i += 1
break
else:
i = len(frames)
frames.insert(i, frame)
return event
@add_global_repr_processor
def _django_queryset_repr(value, hint):
# type: (Any, Dict[str, Any]) -> Union[NotImplementedType, str]
try:
# Django 1.6 can fail to import `QuerySet` when Django settings
# have not yet been initialized.
#
# If we fail to import, return `NotImplemented`. It's at least
# unlikely that we have a query set in `value` when importing
# `QuerySet` fails.
from django.db.models.query import QuerySet
except Exception:
return NotImplemented
if not isinstance(value, QuerySet) or value._result_cache:
return NotImplemented
return "<%s from %s at 0x%x>" % (
value.__class__.__name__,
value.__module__,
id(value),
)
_patch_channels()
patch_django_middlewares()
patch_views()
patch_templates()
patch_signals()
add_template_context_repr_sequence()
if patch_caching is not None:
patch_caching()
_DRF_PATCHED = False
_DRF_PATCH_LOCK = threading.Lock()
def _patch_drf():
# type: () -> None
"""
Patch Django Rest Framework for more/better request data. DRF's request
type is a wrapper around Django's request type. The attribute we're
interested in is `request.data`, which is a cached property containing a
parsed request body. Reading a request body from that property is more
reliable than reading from any of Django's own properties, as those don't
hold payloads in memory and therefore can only be accessed once.
We patch the Django request object to include a weak backreference to the
DRF request object, such that we can later use either in
`DjangoRequestExtractor`.
This function is not called directly on SDK setup, because importing almost
any part of Django Rest Framework will try to access Django settings (where
`sentry_sdk.init()` might be called from in the first place). Instead we
run this function on every request and do the patching on the first
request.
"""
global _DRF_PATCHED
if _DRF_PATCHED:
# Double-checked locking
return
with _DRF_PATCH_LOCK:
if _DRF_PATCHED:
return
# We set this regardless of whether the code below succeeds or fails.
# There is no point in trying to patch again on the next request.
_DRF_PATCHED = True
with capture_internal_exceptions():
try:
from rest_framework.views import APIView # type: ignore
except ImportError:
pass
else:
old_drf_initial = APIView.initial
def sentry_patched_drf_initial(self, request, *args, **kwargs):
# type: (APIView, Any, *Any, **Any) -> Any
with capture_internal_exceptions():
request._request._sentry_drf_request_backref = weakref.ref(
request
)
pass
return old_drf_initial(self, request, *args, **kwargs)
APIView.initial = sentry_patched_drf_initial
def _patch_channels():
# type: () -> None
try:
from channels.http import AsgiHandler # type: ignore
except ImportError:
return
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
#
# We cannot hard-raise here because channels may not be used at all in
# the current process. That is the case when running traditional WSGI
# workers in gunicorn+gevent and the websocket stuff in a separate
# process.
logger.warning(
"We detected that you are using Django channels 2.0."
+ CONTEXTVARS_ERROR_MESSAGE
)
from sentry_sdk.integrations.django.asgi import patch_channels_asgi_handler_impl
patch_channels_asgi_handler_impl(AsgiHandler)
def _patch_django_asgi_handler():
# type: () -> None
try:
from django.core.handlers.asgi import ASGIHandler
except ImportError:
return
if not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
#
# We cannot hard-raise here because Django's ASGI stuff may not be used
# at all.
logger.warning(
"We detected that you are using Django 3." + CONTEXTVARS_ERROR_MESSAGE
)
from sentry_sdk.integrations.django.asgi import patch_django_asgi_handler_impl
patch_django_asgi_handler_impl(ASGIHandler)
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (sentry_sdk.Scope, str, WSGIRequest) -> None
try:
transaction_name = None
if transaction_style == "function_name":
fn = resolve(request.path).func
transaction_name = transaction_from_function(getattr(fn, "view_class", fn))
elif transaction_style == "url":
if hasattr(request, "urlconf"):
transaction_name = LEGACY_RESOLVER.resolve(
request.path_info, urlconf=request.urlconf
)
else:
transaction_name = LEGACY_RESOLVER.resolve(request.path_info)
if transaction_name is None:
transaction_name = request.path_info
source = TransactionSource.URL
else:
source = SOURCE_FOR_STYLE[transaction_style]
scope.set_transaction_name(
transaction_name,
source=source,
)
except Resolver404:
urlconf = import_module(settings.ROOT_URLCONF)
# This exception only gets thrown when transaction_style is `function_name`
# So we don't check here what style is configured
if hasattr(urlconf, "handler404"):
handler = urlconf.handler404
if isinstance(handler, str):
scope.transaction = handler
else:
scope.transaction = transaction_from_function(
getattr(handler, "view_class", handler)
)
except Exception:
pass
def _before_get_response(request):
# type: (WSGIRequest) -> None
integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
if integration is None:
return
_patch_drf()
scope = sentry_sdk.get_current_scope()
# Rely on WSGI middleware to start a trace
_set_transaction_name_and_source(scope, integration.transaction_style, request)
scope.add_event_processor(
_make_wsgi_request_event_processor(weakref.ref(request), integration)
)
def _attempt_resolve_again(request, scope, transaction_style):
# type: (WSGIRequest, sentry_sdk.Scope, str) -> None
"""
Some django middlewares overwrite request.urlconf
so we need to respect that contract,
so we try to resolve the url again.
"""
if not hasattr(request, "urlconf"):
return
_set_transaction_name_and_source(scope, transaction_style, request)
def _after_get_response(request):
# type: (WSGIRequest) -> None
integration = sentry_sdk.get_client().get_integration(DjangoIntegration)
if integration is None or integration.transaction_style != "url":
return
scope = sentry_sdk.get_current_scope()
_attempt_resolve_again(request, scope, integration.transaction_style)
def _patch_get_response():
# type: () -> None
"""
patch get_response, because at that point we have the Django request object
"""
from django.core.handlers.base import BaseHandler
old_get_response = BaseHandler.get_response
def sentry_patched_get_response(self, request):
# type: (Any, WSGIRequest) -> Union[HttpResponse, BaseException]
_before_get_response(request)
rv = old_get_response(self, request)
_after_get_response(request)
return rv
BaseHandler.get_response = sentry_patched_get_response
if hasattr(BaseHandler, "get_response_async"):
from sentry_sdk.integrations.django.asgi import patch_get_response_async
patch_get_response_async(BaseHandler, _before_get_response)
def _make_wsgi_request_event_processor(weak_request, integration):
# type: (Callable[[], WSGIRequest], DjangoIntegration) -> EventProcessor
def wsgi_request_event_processor(event, hint):
# type: (Event, dict[str, Any]) -> Event
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
request = weak_request()
if request is None:
return event
django_3 = ASGIRequest is not None
if django_3 and type(request) == ASGIRequest:
# We have a `asgi_request_event_processor` for this.
return event
with capture_internal_exceptions():
DjangoRequestExtractor(request).extract_into_event(event)
if should_send_default_pii():
with capture_internal_exceptions():
_set_user_info(request, event)
return event
return wsgi_request_event_processor
def _got_request_exception(request=None, **kwargs):
# type: (WSGIRequest, **Any) -> None
client = sentry_sdk.get_client()
integration = client.get_integration(DjangoIntegration)
if integration is None:
return
if request is not None and integration.transaction_style == "url":
scope = sentry_sdk.get_current_scope()
_attempt_resolve_again(request, scope, integration.transaction_style)
event, hint = event_from_exception(
sys.exc_info(),
client_options=client.options,
mechanism={"type": "django", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
| DjangoIntegration |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 239025,
"end": 240516
} | class ____(ExternKernel):
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.generate_extern_kernel_alloc(self)
def __init__(
self,
layout: OutputSpec,
inputs: Sequence[IRNode],
constant_args: Sequence[Any] = (),
kwargs: Optional[dict[str, Any]] = None,
python_kernel_name: Optional[str] = None,
cpp_kernel_name: Optional[str] = None,
ordered_kwargs_for_cpp_kernel: Sequence[Any] = (),
op_overload: Optional[_OpOverloads] = None,
) -> None:
unwrapped_inputs = self.unwrap_storage(inputs)
assert all(isinstance(i, IRNode) for i in unwrapped_inputs)
super().__init__(
None,
layout,
cast(Sequence[IRNode], unwrapped_inputs),
constant_args,
kwargs or {},
None,
python_kernel_name,
cpp_kernel_name,
ordered_kwargs_for_cpp_kernel,
op_overload,
)
# We need output buffers for generating kernel arguments in the
# abi-compatible mode, where we retrieve outputs by pass each individual
# output through the abi-compatible interface.
self.outputs: Sequence[Any] = []
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
def should_allocate(self) -> bool:
return False
def apply_constraint(self) -> None:
raise NotImplementedError
| ExternKernelAlloc |
python | allegroai__clearml | clearml/backend_api/services/v2_9/queues.py | {
"start": 33852,
"end": 34719
} | class ____(Request):
"""
Gets queue information
:param queue: Queue ID
:type queue: str
"""
_service = "queues"
_action = "get_by_id"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"queue": {"description": "Queue ID", "type": "string"}},
"required": ["queue"],
"type": "object",
}
def __init__(self, queue: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.queue = queue
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
| GetByIdRequest |
python | ipython__ipython | tests/test_guarded_eval.py | {
"start": 7103,
"end": 7194
} | class ____:
def __call__(self) -> HeapType:
return HeapType()
| CallCreatesHeapType |
python | cython__cython | Cython/Compiler/UFuncs.py | {
"start": 1328,
"end": 10874
} | class ____:
def __init__(self, node):
self.node = node
self.global_scope = node.local_scope.global_scope()
self.injected_typename = "ufunc_typename"
while self.node.entry.cname.startswith(self.injected_typename):
self.injected_typename += "_"
self.injected_types = []
self.in_definitions = self.get_in_type_info()
self.out_definitions = self.get_out_type_info()
def _handle_typedef_type_constant(self, type_, macro_name):
decl = type_.empty_declaration_code()
substituted_cname = decl.strip().replace('_', '__').replace(' ', '_')
context = dict(
type_substituted_cname=substituted_cname,
macro_name=macro_name,
type_cname=decl,
)
self.global_scope.use_utility_code(
TempitaUtilityCode.load(
'UFuncTypedef',
'UFuncs_C.c',
context=context
))
return f"__Pyx_typedef_ufunc_{substituted_cname}"
def _get_type_constant(self, pos, type_):
base_type = type_
if base_type.is_typedef:
base_type = base_type.typedef_base_type
base_type = PyrexTypes.remove_cv_ref(base_type)
if base_type is PyrexTypes.c_bint_type:
# TODO - this would be nice but not obvious it works
error(pos, "Type '%s' cannot be used as a ufunc argument" % type_)
return
if type_.is_complex:
return self._handle_typedef_type_constant(
type_,
"__PYX_GET_NPY_COMPLEX_TYPE")
elif type_.is_int:
signed = ""
if type_.signed == PyrexTypes.SIGNED:
signed = "S"
elif type_.signed == PyrexTypes.UNSIGNED:
signed = "U"
return self._handle_typedef_type_constant(
type_,
f"__PYX_GET_NPY_{signed}INT_TYPE")
elif type_.is_float:
return self._handle_typedef_type_constant(
type_,
"__PYX_GET_NPY_FLOAT_TYPE")
elif type_.is_pyobject:
return "NPY_OBJECT"
# TODO possible NPY_BOOL to bint but it needs a cast?
# TODO NPY_DATETIME, NPY_TIMEDELTA, NPY_STRING, NPY_UNICODE and maybe NPY_VOID might be handleable
error(pos, "Type '%s' cannot be used as a ufunc argument" % type_)
def get_in_type_info(self):
definitions = []
for n, arg in enumerate(self.node.args):
injected_typename = f"{self.injected_typename}_in_{n}"
self.injected_types.append(injected_typename)
type_const = self._get_type_constant(self.node.pos, arg.type)
definitions.append(_ArgumentInfo(arg.type, type_const, injected_typename))
return definitions
def get_out_type_info(self):
if self.node.return_type.is_ctuple:
components = self.node.return_type.components
else:
components = [self.node.return_type]
definitions = []
for n, type in enumerate(components):
injected_typename = f"{self.injected_typename}_out_{n}"
self.injected_types.append(injected_typename)
type_const = self._get_type_constant(self.node.pos, type)
definitions.append(
_ArgumentInfo(type, type_const, injected_typename)
)
return definitions
def generate_cy_utility_code(self):
arg_types = [(a.injected_typename, a.type) for a in self.in_definitions]
out_types = [(a.injected_typename, a.type) for a in self.out_definitions]
context_types = dict(arg_types + out_types)
self.node.entry.used = True
ufunc_cname = self.global_scope.next_id(self.node.entry.name + "_ufunc_def")
will_be_called_without_gil = not (any(t.is_pyobject for _, t in arg_types) or
any(t.is_pyobject for _, t in out_types))
context = dict(
func_cname=ufunc_cname,
in_types=arg_types,
out_types=out_types,
inline_func_call=self.node.entry.cname,
nogil=self.node.entry.type.nogil,
will_be_called_without_gil=will_be_called_without_gil,
**context_types
)
ufunc_global_scope = Symtab.ModuleScope(
"ufunc_module", None, self.global_scope.context
)
ufunc_global_scope.declare_cfunction(
name=self.node.entry.cname,
cname=self.node.entry.cname,
type=self.node.entry.type,
pos=self.node.pos,
visibility="extern",
)
code = CythonUtilityCode.load(
"UFuncDefinition",
"UFuncs.pyx",
context=context,
from_scope = ufunc_global_scope,
#outer_module_scope=ufunc_global_scope,
)
tree = code.get_tree(entries_only=True)
return tree
def use_generic_utility_code(self):
# use the invariant C utility code
self.global_scope.use_utility_code(
UtilityCode.load_cached("UFuncsInit", "UFuncs_C.c")
)
self.global_scope.use_utility_code(
UtilityCode.load_cached("UFuncTypeHandling", "UFuncs_C.c")
)
self.global_scope.use_utility_code(
UtilityCode.load_cached("NumpyImportUFunc", "NumpyImportArray.c")
)
def convert_to_ufunc(node):
if isinstance(node, Nodes.CFuncDefNode):
if node.local_scope.parent_scope.is_c_class_scope:
error(node.pos, "Methods cannot currently be converted to a ufunc")
return node
converters = [UFuncConversion(node)]
original_node = node
elif isinstance(node, FusedNode.FusedCFuncDefNode) and isinstance(
node.node, Nodes.CFuncDefNode
):
if node.node.local_scope.parent_scope.is_c_class_scope:
error(node.pos, "Methods cannot currently be converted to a ufunc")
return node
converters = [UFuncConversion(n) for n in node.nodes]
original_node = node.node
else:
error(node.pos, "Only C functions can be converted to a ufunc")
return node
if not converters:
return # this path probably shouldn't happen
del converters[0].global_scope.entries[original_node.entry.name]
# the generic utility code is generic, so there's no reason to do it multiple times
converters[0].use_generic_utility_code()
return [node] + _generate_stats_from_converters(converters, original_node)
def generate_ufunc_initialization(converters, cfunc_nodes, original_node):
global_scope = converters[0].global_scope
ufunc_funcs_name = global_scope.next_id(Naming.pyrex_prefix + "funcs")
ufunc_types_name = global_scope.next_id(Naming.pyrex_prefix + "types")
ufunc_data_name = global_scope.next_id(Naming.pyrex_prefix + "data")
type_constants = []
narg_in = None
narg_out = None
for c in converters:
in_const = [d.type_constant for d in c.in_definitions]
if narg_in is not None:
assert narg_in == len(in_const)
else:
narg_in = len(in_const)
type_constants.extend(in_const)
out_const = [d.type_constant for d in c.out_definitions]
if narg_out is not None:
assert narg_out == len(out_const)
else:
narg_out = len(out_const)
type_constants.extend(out_const)
func_cnames = [cfnode.entry.cname for cfnode in cfunc_nodes]
context = dict(
ufunc_funcs_name=ufunc_funcs_name,
func_cnames=func_cnames,
ufunc_types_name=ufunc_types_name,
type_constants=type_constants,
ufunc_data_name=ufunc_data_name,
)
global_scope.use_utility_code(
TempitaUtilityCode.load("UFuncConsts", "UFuncs_C.c", context=context)
)
pos = original_node.pos
func_name = original_node.entry.name
docstr = original_node.doc
args_to_func = '%s(), %s, %s(), %s, %s, %s, PyUFunc_None, "%s", %s, 0' % (
ufunc_funcs_name,
ufunc_data_name,
ufunc_types_name,
len(func_cnames),
narg_in,
narg_out,
func_name,
docstr.as_c_string_literal() if docstr else "NULL",
)
call_node = ExprNodes.PythonCapiCallNode(
pos,
function_name="PyUFunc_FromFuncAndData",
# use a dummy type because it's honestly too fiddly
func_type=PyrexTypes.CFuncType(
PyrexTypes.py_object_type,
[PyrexTypes.CFuncTypeArg("dummy", PyrexTypes.c_void_ptr_type, None)],
),
args=[
ExprNodes.ConstNode(
pos, type=PyrexTypes.c_void_ptr_type, value=args_to_func
)
],
)
lhs_entry = global_scope.declare_var(func_name, PyrexTypes.py_object_type, pos)
assgn_node = Nodes.SingleAssignmentNode(
pos,
lhs=ExprNodes.NameNode(
pos, name=func_name, type=PyrexTypes.py_object_type, entry=lhs_entry
),
rhs=call_node,
)
return assgn_node
def _generate_stats_from_converters(converters, node):
stats = []
for converter in converters:
tree = converter.generate_cy_utility_code()
ufunc_node = get_cfunc_from_tree(tree)
# merge in any utility code
converter.global_scope.utility_code_list.extend(tree.scope.utility_code_list)
stats.append(ufunc_node)
stats.append(generate_ufunc_initialization(converters, stats, node))
return stats
| UFuncConversion |
python | realpython__materials | primer-on-python-decorators/class_decorators.py | {
"start": 38,
"end": 287
} | class ____:
@debug
def __init__(self, max_num):
self.max_num = max_num
@timer
def waste_time(self, num_times):
for _ in range(num_times):
sum([number**2 for number in range(self.max_num)])
@timer
| TimeWaster |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 870,
"end": 941
} | class ____(Model2B):
field3 = models.CharField(max_length=30)
| Model2C |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 47720,
"end": 48130
} | class ____(Elemwise):
_projection_passthrough = True
_parameters = ["frame", "freq", "how"]
_defaults = {"freq": None, "how": "start"}
operation = M.to_timestamp
_filter_passthrough = True
_preserves_partitioning_information = True
def _divisions(self):
return tuple(
pd.Index(self.frame.divisions).to_timestamp(freq=self.freq, how=self.how)
)
| ToTimestamp |
python | huggingface__transformers | src/transformers/models/modernbert_decoder/modular_modernbert_decoder.py | {
"start": 19605,
"end": 19697
} | class ____(ModernBertPredictionHead):
pass
@auto_docstring
| ModernBertDecoderPredictionHead |
python | getsentry__sentry | src/sentry/consumers/validate_schema.py | {
"start": 286,
"end": 2777
} | class ____(ProcessingStrategy[KafkaPayload]):
"""
Since ValidateSchema is currently a separate step to the main message
processing function, messages that are validated will be decoded twice. As a result,
we don't validate a large number of messages outside of dev and test environments.
If enforce_schema=True is passed, every message that fails validation will
raise an error and crash the consumer. This is designed for use in dev and test
environments. Otherwise, we rate limit message validation to once per second and log
warnings.
"""
def __init__(
self, topic: str, enforce_schema: bool, next_step: ProcessingStrategy[KafkaPayload]
) -> None:
self.__topic = topic
self.__enforce_schema = enforce_schema
self.__next_step = next_step
self.__last_record_time: float | None = None
self.__codec: sentry_kafka_schemas.codecs.Codec[Any] | None
try:
self.__codec = sentry_kafka_schemas.get_codec(topic)
except sentry_kafka_schemas.SchemaNotFound:
self.__codec = None
def submit(self, message: Message[KafkaPayload]) -> None:
if self.__enforce_schema:
if self.__codec is not None:
# This will raise an exception if the message is invalid
self.__codec.decode(message.payload.value, validate=True)
else:
now = time.time()
if self.__last_record_time is None or self.__last_record_time + 1.0 < now:
with sentry_sdk.isolation_scope() as scope:
scope.add_attachment(bytes=message.payload.value, filename="message.txt")
scope.set_tag("topic", self.__topic)
if self.__codec is None:
logger.warning("No validator configured for topic")
else:
try:
self.__codec.decode(message.payload.value)
except sentry_kafka_schemas.codecs.ValidationError:
logger.warning("Invalid message received")
self.__last_record_time = now
self.__next_step.submit(message)
def poll(self) -> None:
self.__next_step.poll()
def join(self, timeout: float | None = None) -> None:
self.__next_step.join(timeout)
def close(self) -> None:
self.__next_step.close()
def terminate(self) -> None:
self.__next_step.terminate()
| ValidateSchema |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 37974,
"end": 38479
} | class ____(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
"admin.E102",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
| SaveOnTopCheckTests |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/orcid/tests.py | {
"start": 238,
"end": 11682
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = OrcidProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"orcid-identifier": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"preferences": {
"locale": "EN"
},
"history": {
"creation-method": "MEMBER_REFERRED",
"completion-date": null,
"submission-date": {
"value": 1456951327337
},
"last-modified-date": {
"value": 1519493486728
},
"claimed": true,
"source": null,
"deactivation-date": null,
"verified-email": true,
"verified-primary-email": true
},
"person": {
"last-modified-date": {
"value": 1519493469738
},
"name": {
"created-date": {
"value": 1460669254582
},
"last-modified-date": {
"value": 1460669254582
},
"given-names": {
"value": "Patricia"
},
"family-name": {
"value": "Lawrence"
},
"credit-name": null,
"source": null,
"visibility": "PUBLIC",
"path": "0000-0001-6796-198X"
},
"other-names": {
"last-modified-date": null,
"other-name": [],
"path": "/0000-0001-6796-198X/other-names"
},
"biography": {
"created-date": {
"value": 1460669254583
},
"last-modified-date": {
"value": 1460669254583
},
"content": null,
"visibility": "PUBLIC",
"path": "/0000-0001-6796-198X/biography"
},
"researcher-urls": {
"last-modified-date": null,
"researcher-url": [],
"path": "/0000-0001-6796-198X/researcher-urls"
},
"emails": {
"last-modified-date": {
"value": 1519493469738
},
"email": [
{
"created-date": {
"value": 1456951327661
},
"last-modified-date": {
"value": 1519493469738
},
"source": {
"source-orcid": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"source-client-id": null,
"source-name": {
"value": "Patricia Lawrence"
}
},
"email": "lawrencepatricia@mailinator.com",
"path": null,
"visibility": "PUBLIC",
"verified": true,
"primary": true,
"put-code": null
}
],
"path": "/0000-0001-6796-198X/email"
},
"addresses": {
"last-modified-date": null,
"address": [],
"path": "/0000-0001-6796-198X/address"
},
"keywords": {
"last-modified-date": null,
"keyword": [],
"path": "/0000-0001-6796-198X/keywords"
},
"external-identifiers": {
"last-modified-date": null,
"external-identifier": [],
"path": "/0000-0001-6796-198X/external-identifiers"
},
"path": "/0000-0001-6796-198X/person"
},
"activities-summary": {
"last-modified-date": {
"value": 1513777479628
},
"educations": {
"last-modified-date": {
"value": 1459957293365
},
"education-summary": [
{
"created-date": {
"value": 1459957293365
},
"last-modified-date": {
"value": 1459957293365
},
"source": {
"source-orcid": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"source-client-id": null,
"source-name": {
"value": "Patricia Lawrence"
}
},
"department-name": null,
"role-title": null,
"start-date": null,
"end-date": null,
"organization": {
"name": "Polytech'Rambouillet",
"address": {
"city": "Rambouillet",
"region": null,
"country": "FR"
},
"disambiguated-organization": null
},
"visibility": "PUBLIC",
"put-code": 19996,
"path": "/0000-0001-6796-198X/education/19996"
}
],
"path": "/0000-0001-6796-198X/educations"
},
"employments": {
"last-modified-date": {
"value": 1513777479628
},
"employment-summary": [
{
"created-date": {
"value": 1510399314937
},
"last-modified-date": {
"value": 1513777479628
},
"source": {
"source-orcid": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"source-client-id": null,
"source-name": {
"value": "Patricia Lawrence"
}
},
"department-name": null,
"role-title": null,
"start-date": {
"year": {
"value": "2015"
},
"month": {
"value": "03"
},
"day": {
"value": "02"
}
},
"end-date": null,
"organization": {
"name": "École nationale supérieure de céramique industrielle",
"address": {
"city": "Limoges",
"region": null,
"country": "FR"
},
"disambiguated-organization": {
"disambiguated-organization-identifier": "105362",
"disambiguation-source": "RINGGOLD"
}
},
"visibility": "PUBLIC",
"put-code": 29138,
"path": "/0000-0001-6796-198X/employment/29138"
},
{
"created-date": {
"value": 1502366640610
},
"last-modified-date": {
"value": 1513777467282
},
"source": {
"source-orcid": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"source-client-id": null,
"source-name": {
"value": "Patricia Lawrence"
}
},
"department-name": null,
"role-title": null,
"start-date": {
"year": {
"value": "2002"
},
"month": {
"value": "02"
},
"day": {
"value": "16"
}
},
"end-date": {
"year": {
"value": "2015"
},
"month": {
"value": "08"
},
"day": {
"value": "12"
}
},
"organization": {
"name": "University of Cambridge",
"address": {
"city": "Cambridge",
"region": "Cambridgeshire",
"country": "GB"
},
"disambiguated-organization": {
"disambiguated-organization-identifier": "2152",
"disambiguation-source": "RINGGOLD"
}
},
"visibility": "PUBLIC",
"put-code": 27562,
"path": "/0000-0001-6796-198X/employment/27562"
}
],
"path": "/0000-0001-6796-198X/employments"
},
"fundings": {
"last-modified-date": null,
"group": [],
"path": "/0000-0001-6796-198X/fundings"
},
"peer-reviews": {
"last-modified-date": null,
"group": [],
"path": "/0000-0001-6796-198X/peer-reviews"
},
"works": {
"last-modified-date": {
"value": 1459957753077
},
"group": [
{
"last-modified-date": {
"value": 1459957753077
},
"external-ids": {
"external-id": []
},
"work-summary": [
{
"put-code": 583440,
"created-date": {
"value": 1459957753047
},
"last-modified-date": {
"value": 1459957753077
},
"source": {
"source-orcid": {
"uri": "https://sandbox.orcid.org/0000-0001-6796-198X",
"path": "0000-0001-6796-198X",
"host": "sandbox.orcid.org"
},
"source-client-id": null,
"source-name": {
"value": "Patricia Lawrence"
}
},
"title": {
"title": {
"value": "Standard & Poor's fiscal methodology reviewed"
},
"subtitle": null,
"translated-title": null
},
"external-ids": {
"external-id": []
},
"type": "JOURNAL_ARTICLE",
"publication-date": {
"year": {
"value": "2001"
},
"month": {
"value": "07"
},
"day": {
"value": "14"
},
"media-type": null
},
"visibility": "PUBLIC",
"path": "/0000-0001-6796-198X/work/583440",
"display-index": "0"
}
]
}
],
"path": "/0000-0001-6796-198X/works"
},
"path": "/0000-0001-6796-198X/activities"
},
"path": "/0000-0001-6796-198X"
}
""",
)
def get_expected_to_str(self):
return "Orcid.org"
def get_login_response_json(self, with_refresh_token=True):
# TODO: This is not an actual response. I added this in order
# to get the test suite going but did not verify to check the
# exact response being returned.
return """
{
"access_token": "testac",
"expires_in": 631138026,
"token_type": "bearer",
"orcid": "0000-0001-6796-198X",
"scope": "/orcid-profile/read-limited",
"refresh_token": "testrf"
}"""
| OrcidTests |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 19558,
"end": 20260
} | class ____:
def test_basic(self, xp):
xp_assert_close(windows.general_hamming(5, 0.7, xp=xp),
xp.asarray([0.4, 0.7, 1.0, 0.7, 0.4], dtype=xp.float64))
xp_assert_close(windows.general_hamming(5, 0.75, sym=False, xp=xp),
xp.asarray([0.5, 0.6727457514, 0.9522542486,
0.9522542486, 0.6727457514], dtype=xp.float64))
xp_assert_close(windows.general_hamming(6, 0.75, sym=True, xp=xp),
xp.asarray([0.5, 0.6727457514, 0.9522542486,
0.9522542486, 0.6727457514, 0.5], dtype=xp.float64))
@make_xp_test_case(windows.hamming)
| TestGeneralHamming |
python | sphinx-doc__sphinx | sphinx/domains/python/_object.py | {
"start": 5019,
"end": 18720
} | class ____(ObjectDescription[tuple[str, str]]):
"""Description of a general Python object.
:cvar allow_nesting: Class is an object that allows for nested namespaces
:vartype allow_nesting: bool
"""
option_spec: ClassVar[OptionSpec] = {
'no-index': directives.flag,
'no-index-entry': directives.flag,
'no-contents-entry': directives.flag,
'no-typesetting': directives.flag,
'noindex': directives.flag,
'noindexentry': directives.flag,
'nocontentsentry': directives.flag,
'single-line-parameter-list': directives.flag,
'single-line-type-parameter-list': directives.flag,
'module': directives.unchanged,
'canonical': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
PyTypedField(
'parameter',
label=_('Parameters'),
names=(
'param',
'parameter',
'arg',
'argument',
'keyword',
'kwarg',
'kwparam',
),
typerolename='class',
typenames=('paramtype', 'type'),
can_collapse=True,
),
PyTypedField(
'variable',
label=_('Variables'),
names=('var', 'ivar', 'cvar'),
typerolename='class',
typenames=('vartype',),
can_collapse=True,
),
PyGroupedField(
'exceptions',
label=_('Raises'),
rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True,
),
Field(
'returnvalue',
label=_('Returns'),
has_arg=False,
names=('returns', 'return'),
),
PyField(
'returntype',
label=_('Return type'),
has_arg=False,
names=('rtype',),
bodyrolename='class',
),
]
allow_nesting = False
def get_signature_prefix(self, sig: str) -> Sequence[nodes.Node]:
"""May return a prefix to put before the object name in the
signature.
"""
return []
def needs_arglist(self) -> bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]:
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None:
raise ValueError
prefix, name, tp_list, arglist, retann = m.groups()
# determine module and class name (if applicable), as well as full name
modname = self.options.get('module', self.env.ref_context.get('py:module'))
classname = self.env.ref_context.get('py:class')
if classname:
add_module = False
if prefix and (prefix == classname or prefix.startswith(f'{classname}.')):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname) :].lstrip('.')
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = f'{classname}.{prefix}{name}'
else:
# class name is not given in the signature
fullname = f'{classname}.{name}'
else:
add_module = True
if prefix:
classname = prefix.rstrip('.')
fullname = prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
max_len = (
self.config.python_maximum_signature_line_length
or self.config.maximum_signature_line_length
or 0
)
# determine if the function arguments (without its type parameters)
# should be formatted on a multiline or not by removing the width of
# the type parameters list (if any)
sig_len = len(sig)
tp_list_span = m.span(3)
multi_line_parameter_list = (
'single-line-parameter-list' not in self.options
and (sig_len - (tp_list_span[1] - tp_list_span[0])) > max_len > 0
)
# determine whether the type parameter list must be wrapped or not
arglist_span = m.span(4)
multi_line_type_parameter_list = (
'single-line-type-parameter-list' not in self.options
and (sig_len - (arglist_span[1] - arglist_span[0])) > max_len > 0
)
trailing_comma = self.env.config.python_trailing_comma_in_multi_line_signatures
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
if type(sig_prefix) is str:
msg = (
'Python directive method get_signature_prefix()'
' must return a list of nodes.'
f" Return value was '{sig_prefix}'."
)
raise TypeError(msg)
signode += addnodes.desc_annotation(str(sig_prefix), '', *sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif modname and add_module and self.config.add_module_names:
nodetext = f'{modname}.'
signode += addnodes.desc_addname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if tp_list:
try:
signode += _parse_type_list(
tp_list,
self.env,
multi_line_type_parameter_list,
trailing_comma,
)
except Exception as exc:
logger.warning(
'could not parse tp_list (%r): %s', tp_list, exc, location=signode
)
if arglist:
try:
signode += _parse_arglist(
arglist,
self.env,
multi_line_parameter_list,
trailing_comma,
)
except SyntaxError as exc:
# fallback to parse arglist original parser
# (this may happen if the argument list is incorrectly used
# as a list of bases when documenting a class)
# it supports to represent optional arguments (ex. "func(foo [, bar])")
logger.debug(
'syntax error in arglist (%r): %s', arglist, exc, location=signode
)
_pseudo_parse_arglist(
signode,
arglist,
multi_line_parameter_list=multi_line_parameter_list,
trailing_comma=trailing_comma,
env=self.env,
)
except (NotImplementedError, ValueError) as exc:
# duplicated parameter names raise ValueError and not a SyntaxError
logger.warning(
'could not parse arglist (%r): %s', arglist, exc, location=signode
)
_pseudo_parse_arglist(
signode,
arglist,
multi_line_parameter_list=multi_line_parameter_list,
trailing_comma=trailing_comma,
env=self.env,
)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
children = _parse_annotation(retann, self.env)
signode += addnodes.desc_returns(retann, '', *children)
anno = self.options.get('annotation')
if anno:
signode += addnodes.desc_annotation(
f' {anno}', '', addnodes.desc_sig_space(), nodes.Text(anno)
)
return fullname, prefix
def _object_hierarchy_parts(self, sig_node: desc_signature) -> tuple[str, ...]:
if 'fullname' not in sig_node:
return ()
modname = sig_node.get('module')
fullname = sig_node['fullname']
if modname:
return (modname, *fullname.split('.'))
else:
return tuple(fullname.split('.'))
def get_index_text(self, modname: str, name: tuple[str, str]) -> str:
"""Return the text for the index entry of the object."""
msg = 'must be implemented in subclasses'
raise NotImplementedError(msg)
def add_target_and_index(
self, name_cls: tuple[str, str], sig: str, signode: desc_signature
) -> None:
mod_name = self.options.get('module', self.env.ref_context.get('py:module'))
fullname = (f'{mod_name}.' if mod_name else '') + name_cls[0]
node_id = make_id(self.env, self.state.document, '', fullname)
signode['ids'].append(node_id)
self.state.document.note_explicit_target(signode)
domain = self.env.domains.python_domain
domain.note_object(fullname, self.objtype, node_id, location=signode)
if self.objtype != 'type':
# py:type directive uses `canonical` option for a different meaning
canonical_name = self.options.get('canonical')
if canonical_name:
domain.note_object(
canonical_name,
self.objtype,
node_id,
aliased=True,
location=signode,
)
if 'no-index-entry' not in self.options:
if index_text := self.get_index_text(mod_name, name_cls): # type: ignore[arg-type]
self.indexnode['entries'].append((
'single',
index_text,
node_id,
'',
None,
))
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`PyObject` represents Python language constructs. For
constructs that are nestable, such as a Python classes, this method will
build up a stack of the nesting hierarchy so that it can be later
de-nested correctly, in :py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
prefix = None
if self.names:
# fullname and name_prefix come from the `handle_signature` method.
# fullname represents the full object name that is constructed using
# object nesting and explicit prefixes. `name_prefix` is the
# explicit prefix given in a signature
(fullname, name_prefix) = self.names[-1]
if self.allow_nesting:
prefix = fullname
elif name_prefix:
prefix = name_prefix.strip('.')
if prefix:
self.env.ref_context['py:class'] = prefix
if self.allow_nesting:
classes = self.env.ref_context.setdefault('py:classes', [])
classes.append(prefix)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
modules.append(self.env.ref_context.get('py:module'))
self.env.ref_context['py:module'] = self.options['module']
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
classes = self.env.ref_context.setdefault('py:classes', [])
if self.allow_nesting:
with contextlib.suppress(IndexError):
classes.pop()
self.env.ref_context['py:class'] = classes[-1] if len(classes) > 0 else None
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
if modules:
self.env.ref_context['py:module'] = modules.pop()
else:
self.env.ref_context.pop('py:module')
def _toc_entry_name(self, sig_node: desc_signature) -> str:
if not sig_node.get('_toc_parts'):
return ''
config = self.config
objtype = sig_node.parent.get('objtype')
if config.add_function_parentheses and objtype in {'function', 'method'}:
parens = '()'
else:
parens = ''
*parents, name = sig_node['_toc_parts']
if config.toc_object_entries_show_parents == 'domain':
return sig_node.get('fullname', name) + parens
if config.toc_object_entries_show_parents == 'hide':
return name + parens
if config.toc_object_entries_show_parents == 'all':
return '.'.join([*parents, name + parens])
return ''
| PyObject |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 94948,
"end": 101101
} | class ____(BigBirdPegasusPreTrainedModel):
_tied_weights_keys = {
"encoder.embed_tokens.weight": "shared.weight",
"decoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: BigBirdPegasusConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = BigBirdPegasusScaledWordEmbedding(
vocab_size, config.d_model, padding_idx, embed_scale=embed_scale
)
self.encoder = BigBirdPegasusEncoder(config)
self.decoder = BigBirdPegasusDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Provide for translation and summarization training. By default, the model will create this tensor by
shifting the `input_ids` to the right, following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read
[`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in
[the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy.
"""
# different to other models, BigBirdPegasus automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The BigBirdPegasus Model with a language modeling head. Can be used for summarization.
"""
)
| BigBirdPegasusModel |
python | kamyu104__LeetCode-Solutions | Python/to-lower-case.py | {
"start": 29,
"end": 273
} | class ____(object):
def toLowerCase(self, str):
"""
:type str: str
:rtype: str
"""
return "".join([chr(ord('a')+ord(c)-ord('A'))
if 'A' <= c <= 'Z' else c for c in str])
| Solution |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/with-build-defs.py | {
"start": 149,
"end": 884
} | class ____(dg.Component, dg.Resolvable):
"""Models a shell script as a Dagster asset."""
script_path: str
asset_specs: Sequence[dg.ResolvedAssetSpec]
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
resolved_script_path = Path(context.path, self.script_path).absolute()
@dg.multi_asset(name=Path(self.script_path).stem, specs=self.asset_specs)
def _asset(context: dg.AssetExecutionContext):
self.execute(resolved_script_path, context)
return dg.Definitions(assets=[_asset])
def execute(self, resolved_script_path: Path, context: dg.AssetExecutionContext):
return subprocess.run(["sh", str(resolved_script_path)], check=True)
| ShellCommand |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 12244,
"end": 12417
} | class ____(MyList):
def __eq__(self, other):
return super().__eq__(other) and len(self) > 0
def __hash__(self):
return super().__hash__()
| ExtendedList |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 4866,
"end": 5048
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneMessageEvent, GrapheneStepEvent)
name = "ExecutionStepStartEvent"
| GrapheneExecutionStepStartEvent |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 3834,
"end": 8177
} | class ____(Enum):
"""
Diagnostic configuration (all default to disabled)
- ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
with a results name, but has no contents defined
- ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
defined in a grammar but has never had an expression attached to it
- ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
incorrectly called with multiple str arguments
- ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
calls to :class:`ParserElement.set_name`
Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
All warnings can be enabled by calling :class:`enable_all_warnings`.
"""
warn_multiple_tokens_in_named_alternation = 0
warn_ungrouped_named_tokens_in_collection = 1
warn_name_set_on_empty_Forward = 2
warn_on_parse_using_empty_Forward = 3
warn_on_assignment_to_Forward = 4
warn_on_multiple_string_args_to_oneof = 5
warn_on_match_first_with_lshift_operator = 6
enable_debug_on_named_expressions = 7
def enable_diag(diag_enum: Diagnostics) -> None:
"""
Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.enable(diag_enum.name)
def disable_diag(diag_enum: Diagnostics) -> None:
"""
Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
"""
__diag__.disable(diag_enum.name)
def enable_all_warnings() -> None:
"""
Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
"""
__diag__.enable_all_warnings()
# hide abstract class
del __config_flags
def _should_enable_warnings(
cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
) -> bool:
enable = bool(warn_env_var)
for warn_opt in cmd_line_warn_options:
w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
":"
)[:5]
if not w_action.lower().startswith("i") and (
not (w_message or w_category or w_module) or w_module == "pyparsing"
):
enable = True
elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
enable = False
return enable
if _should_enable_warnings(
sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
):
enable_all_warnings()
# build list of single arg builtins, that can be used as parse actions
# fmt: off
_single_arg_builtins = {
sum, len, sorted, reversed, list, tuple, set, any, all, min, max
}
# fmt: on
_generatorType = types.GeneratorType
ParseImplReturnType = tuple[int, Any]
PostParseReturnType = Union[ParseResults, Sequence[ParseResults]]
ParseCondition = Union[
Callable[[], bool],
Callable[[ParseResults], bool],
Callable[[int, ParseResults], bool],
Callable[[str, int, ParseResults], bool],
]
ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
DebugSuccessAction = Callable[
[str, int, int, "ParserElement", ParseResults, bool], None
]
DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
alphas: str = string.ascii_uppercase + string.ascii_lowercase
identchars: str = pyparsing_unicode.Latin1.identchars
identbodychars: str = pyparsing_unicode.Latin1.identbodychars
nums: str = "0123456789"
hexnums: str = nums + "ABCDEFabcdef"
alphanums: str = alphas + nums
printables: str = "".join([c for c in string.printable if c not in string.whitespace])
| Diagnostics |
python | ipython__ipython | IPython/core/magics/execution.py | {
"start": 5411,
"end": 63804
} | class ____(Magics):
"""Magics related to code execution, debugging, profiling, etc."""
_transformers: Dict[str, Any] = {}
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
# Default execution function used to actually run user code.
self.default_runner = None
@skip_doctest
@no_var_expand
@line_cell_magic
def prun(self, parameter_s='', cell=None):
"""Run a statement through the python code profiler.
**Usage, in line mode**::
%prun [options] statement
**Usage, in cell mode**::
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>
you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
information about class constructors.
-r
return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>
sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
============ =====================
Valid Arg Meaning
============ =====================
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
============ =====================
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>
save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>
save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q
suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
.. versionchanged:: 7.3
User variables are no longer expanded,
the magic line is always left unmodified.
"""
# TODO: port to magic_arguments as currently this is duplicated in IPCompleter._extract_code
opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = self.shell.transform_cell(arg_str)
return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
def _run_with_profiler(self, code, opts, namespace):
"""
Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
Parameters
----------
code : str
Code to be executed.
opts : Struct
Options parsed by `self.parse_options`.
namespace : dict
A dictionary for Python namespace (e.g., `self.shell.user_ns`).
"""
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print(sys_exit, end=' ')
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
print(
f"\n*** Profile stats marshalled to file {repr(dump_file)}.{sys_exit}"
)
if text_file:
pfile = Path(text_file)
pfile.touch(exist_ok=True)
pfile.write_text(output, encoding="utf-8")
print(
f"\n*** Profile printout saved to text file {repr(text_file)}.{sys_exit}"
)
if 'r' in opts:
return stats
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print('Automatic pdb calling has been turned',on_off(new_pdb))
@magic_arguments.magic_arguments()
@magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
help="""
Set break point at LINE in FILE.
"""
)
@magic_arguments.kwds(
epilog="""
Any remaining arguments will be treated as code to run in the debugger.
"""
)
@no_var_expand
@line_cell_magic
@needs_local_scope
def debug(self, line="", cell=None, local_ns=None):
"""Activate the interactive debugger.
This magic command support two ways of activating debugger.
One is to activate debugger before executing code. This way, you
can set a break point, to step through the code from the point.
You can use this mode by giving statements to execute and optionally
a breakpoint.
The other one is to activate debugger in post-mortem mode. You can
activate this mode simply running %debug without any argument.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
.. versionchanged:: 7.3
When running code, user variables are no longer expanded,
the magic line is always left unmodified.
"""
args, extra = magic_arguments.parse_argstring(self.debug, line, partial=True)
if not (args.breakpoint or extra or cell):
self._debug_post_mortem()
elif not (args.breakpoint or cell):
# If there is no breakpoints, the line is just code to execute
self._debug_exec(line, None, local_ns)
else:
# Here we try to reconstruct the code from the output of
# parse_argstring. This might not work if the code has spaces
# For example this fails for `print("a b")`
code = " ".join(extra)
if cell:
code += "\n" + cell
self._debug_exec(code, args.breakpoint, local_ns)
def _debug_post_mortem(self):
self.shell.debugger(force=True)
def _debug_exec(self, code, breakpoint, local_ns=None):
if breakpoint:
(filename, bp_line) = breakpoint.rsplit(':', 1)
bp_line = int(bp_line)
else:
(filename, bp_line) = (None, None)
self._run_with_debugger(
code, self.shell.user_ns, filename, bp_line, local_ns=local_ns
)
@line_magic
def tb(self, s):
"""Print the last traceback.
Optionally, specify an exception reporting mode, tuning the
verbosity of the traceback. By default the currently-active exception
mode is used. See %xmode for changing exception reporting modes.
Valid modes: Plain, Context, Verbose, and Minimal.
"""
interactive_tb = self.shell.InteractiveTB
if s:
# Switch exception reporting mode for this one call.
# Ensure it is switched back.
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
new_mode = s.strip().capitalize()
original_mode = interactive_tb.mode
try:
try:
interactive_tb.set_mode(mode=new_mode)
except Exception:
xmode_switch_err('user')
else:
self.shell.showtraceback()
finally:
interactive_tb.set_mode(mode=original_mode)
else:
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage::
%run [-n -i -e -G]
[( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
( -m mod | filename ) [args]
The filename argument should be either a pure Python script (with
extension ``.py``), or a file with custom IPython syntax (such as
magics). If the latter, the file can be either a script with ``.ipy``
extension, or a Jupyter notebook with ``.ipynb`` extension. When running
a Jupyter notebook, the output from print statements and other
displayed objects will appear in the terminal (even matplotlib figures
will open, if a terminal-compliant backend is being used). Note that,
at the system command line, the ``jupyter run`` command offers similar
functionality for executing notebooks (albeit currently with some
differences in supported options).
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt ``python file args``,
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for ``__name__``
and ``sys.argv``). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Arguments are expanded using shell-like glob match. Patterns
'*', '?', '[seq]' and '[!seq]' can be used. Additionally,
tilde '~' will be expanded into user's home directory. Unlike
real shells, quotation does not suppress expansions. Use
*two* back slashes (e.g. ``\\\\*``) to suppress expansions.
To completely disable these expansions, you can use -G flag.
On Windows systems, the use of single quotes `'` when specifying
a file is not supported. Use double quotes `"`.
Options:
-n
__name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ``if __name__ == "__main__"`` clause.
-i
run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e
ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t
print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional ``-N<N>`` option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script myscript.py)::
In [1]: run -t myscript
IPython CPU timings (estimated):
User : 0.19597 s.
System: 0.0 s.
In [2]: run -t -N5 myscript
IPython CPU timings (estimated):
Total runs performed: 5
Times : Total Per run
User : 0.910862 s, 0.1821724 s.
System: 0.0 s, 0.0 s.
-d
run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling::
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
Or you can specify a breakpoint in a different file::
%run -d -b myotherfile.py:20 myscript
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p
run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy[nb], the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m
specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
-G
disable shell-like glob expansion of arguments.
"""
# Logic to handle issue #3664
# Add '--' after '-m <module_name>' to ignore additional args passed to a module.
if '-m' in parameter_s and '--' not in parameter_s:
argv = shlex.split(parameter_s, posix=(os.name == 'posix'))
for idx, arg in enumerate(argv):
if arg and arg.startswith('-') and arg != '-':
if arg == '-m':
argv.insert(idx + 2, '--')
break
else:
# Positional arg, break
break
parameter_s = ' '.join(shlex.quote(arg) for arg in argv)
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s,
'nidtN:b:pD:l:rs:T:em:G',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
msg = '%r is not a valid modulename on sys.path'%modulename
raise Exception(msg)
arg_lst = [modpath] + arg_lst
try:
fpath = None # initialize to make sure fpath is in scope later
fpath = arg_lst[0]
filename = file_finder(fpath)
except IndexError as e:
msg = 'you must provide at least a filename.'
raise Exception(msg) from e
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
if os.name == 'nt' and re.match(r"^'.*'$",fpath):
warn('For Windows, use double quotes to wrap a filename: %run "mypath\\myfile.py"')
raise Exception(msg) from e
except TypeError:
if fpath in sys.meta_path:
filename = ""
else:
raise
if filename.lower().endswith(('.ipy', '.ipynb')):
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = filename
self.shell.safe_execfile_ipy(filename, raise_exceptions=True)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
if 'G' in opts:
args = arg_lst[1:]
else:
# tilde and glob expansion
args = shellglob(map(os.path.expanduser, arg_lst[1:]))
sys.argv = [filename] + args # put in the proper filename
if 'n' in opts:
name = Path(filename).stem
else:
name = '__main__'
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = name
main_mod = self.shell.user_module
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
# TK: Is this necessary in interactive mode?
prog_ns['__file__'] = filename
else:
# Run in a fresh, empty namespace
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references). See interactiveshell for details
main_mod = self.shell.new_main_mod(filename, name)
prog_ns = main_mod.__dict__
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
if 'p' in opts or 'd' in opts:
if 'm' in opts:
code = 'run_module(modulename, prog_ns)'
code_ns = {
'run_module': self.shell.safe_run_module,
'prog_ns': prog_ns,
'modulename': modulename,
}
else:
if 'd' in opts:
# allow exceptions to raise in debug mode
code = 'execfile(filename, prog_ns, raise_exceptions=True)'
else:
code = 'execfile(filename, prog_ns)'
code_ns = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': get_py_filename(filename),
}
try:
stats = None
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
if 'd' in opts:
bp_file, bp_line = parse_breakpoint(
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
self._run_with_timing(run, nruns)
else:
# regular execution
run()
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
if '__mp_main__' in sys.modules:
sys.modules['__mp_main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
def _run_with_debugger(
self, code, code_ns, filename=None, bp_line=None, bp_file=None, local_ns=None
):
"""
Run `code` in debugger with a break point.
Parameters
----------
code : str
Code to execute.
code_ns : dict
A namespace in which `code` is executed.
filename : str
`code` is ran as if it is in `filename`.
bp_line : int, optional
Line number of the break point.
bp_file : str, optional
Path to the file in which break point is specified.
`filename` is used if not given.
local_ns : dict, optional
A local namespace in which `code` is executed.
Raises
------
UsageError
If the break point given by `bp_line` is not valid.
"""
deb = self.shell.InteractiveTB.pdb
if not deb:
self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
deb = self.shell.InteractiveTB.pdb
# deb.checkline() fails if deb.curframe exists but is None; it can
# handle it not existing. https://github.com/ipython/ipython/issues/10028
if hasattr(deb, 'curframe'):
del deb.curframe
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
deb.clear_all_breaks()
if bp_line is not None:
# Set an initial breakpoint to stop execution
maxtries = 10
bp_file = bp_file or filename
checkline = deb.checkline(bp_file, bp_line)
if not checkline:
for bp in range(bp_line + 1, bp_line + maxtries + 1):
if deb.checkline(bp_file, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
raise UsageError(msg)
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (bp_file, bp_line))
if filename:
# Mimic Pdb._runscript(...)
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
# Start file run
print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
try:
if filename:
# save filename so it can be used by methods on the deb object
deb._exec_filename = filename
while True:
try:
trace = sys.gettrace()
deb.run(code, code_ns, local_ns)
except Restart:
print("Restarting")
if filename:
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
continue
else:
break
finally:
sys.settrace(trace)
# Perform proper cleanup of the session in case if
# it exited with "continue" and not "quit" command
if hasattr(deb, "rcLines"):
# Run this code defensively in case if custom debugger
# class does not implement rcLines, which although public
# is an implementation detail of `pdb.Pdb` and not part of
# the more generic basic debugger framework (`bdb.Bdb`).
deb.set_quit()
deb.rcLines.extend(["q"])
try:
deb.run("", code_ns, local_ns)
except StopIteration:
# Stop iteration is raised on quit command
pass
except Exception:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
@staticmethod
def _run_with_timing(run, nruns):
"""
Run function `run` and print timing information.
Parameters
----------
run : callable
Any callable object which takes no argument.
nruns : int
Number of times to execute `run`.
"""
twall0 = time.perf_counter()
if nruns == 1:
t0 = clock2()
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print(" User : %10.2f s." % t_usr)
print(" System : %10.2f s." % t_sys)
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print("Total runs performed:", nruns)
print(" Times : %10s %10s" % ('Total', 'Per run'))
print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
twall1 = time.perf_counter()
print("Wall time: %10.2f s." % (twall1 - twall0))
@skip_doctest
@no_var_expand
@line_cell_magic
@needs_local_scope
def timeit(self, line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression
**Usage, in line mode**::
%timeit [-n<N> -r<R> [-t|-c] -q -p<P> [-o|-v <V>]] statement
**or in cell mode**::
%%timeit [-n<N> -r<R> [-t|-c] -q -p<P> [-o|-v <V>]] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>
Execute the given statement N times in a loop. If N is not
provided, N is determined so as to get sufficient accuracy.
-r<R>
Number of repeats R, each consisting of N loops, and take the
average result.
Default: 7
-t
Use ``time.time`` to measure the time, which is the default on Unix.
This function measures wall time.
-c
Use ``time.clock`` to measure the time, which is the default on
Windows and measures wall time. On Unix, ``resource.getrusage`` is used
instead and returns the CPU user time.
-p<P>
Use a precision of P digits to display the timing result.
Default: 3
-q
Quiet, do not print result.
-o
Return a ``TimeitResult`` that can be stored in a variable to inspect
the result in more details.
-v <V>
Like ``-o``, but save the ``TimeitResult`` directly to variable <V>.
.. versionchanged:: 7.3
User variables are no longer expanded,
the magic line is always left unmodified.
Examples
--------
::
In [1]: %timeit pass
8.26 ns ± 0.12 ns per loop (mean ± std. dev. of 7 runs, 100000000 loops each)
In [2]: u = None
In [3]: %timeit u is None
29.9 ns ± 0.643 ns per loop (mean ± std. dev. of 7 runs, 10000000 loops each)
In [4]: %timeit -r 4 u == None
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
The times reported by ``%timeit`` will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that ``%timeit`` executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from ``%timeit``."""
# TODO: port to magic_arguments as currently this is duplicated in IPCompleter._extract_code
opts, stmt = self.parse_options(
line, "n:r:tcp:qov:", posix=False, strict=False, preserve_non_opts=True
)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat
repeat = int(getattr(opts, "r", default_repeat))
precision = int(getattr(opts, "p", 3))
quiet = "q" in opts
return_result = "o" in opts
save_result = "v" in opts
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.transform_cell
if cell is None:
# called as line magic
ast_setup = self.shell.compile.ast_parse("pass")
ast_stmt = self.shell.compile.ast_parse(transform(stmt))
else:
ast_setup = self.shell.compile.ast_parse(transform(stmt))
ast_stmt = self.shell.compile.ast_parse(transform(cell))
ast_setup = self.shell.transform_ast(ast_setup)
ast_stmt = self.shell.transform_ast(ast_stmt)
# Check that these compile to valid Python code *outside* the timer func
# Invalid code may become valid when put inside the function & loop,
# which messes up error messages.
# https://github.com/ipython/ipython/issues/10636
self.shell.compile(ast_setup, "<magic-timeit-setup>", "exec")
self.shell.compile(ast_stmt, "<magic-timeit-stmt>", "exec")
# This codestring is taken from timeit.template - we fill it in as an
# AST, so that we can apply our AST transformations to the user code
# without affecting the timing code.
timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
' setup\n'
' _t0 = _timer()\n'
' for _i in _it:\n'
' stmt\n'
' _t1 = _timer()\n'
' return _t1 - _t0\n')
timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
timeit_ast = ast.fix_missing_locations(timeit_ast)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
glob = self.shell.user_ns
# handles global vars with same name as local vars. We store them in conflict_globs.
conflict_globs = {}
if local_ns and cell is None:
for var_name, var_val in glob.items():
if var_name in local_ns:
conflict_globs[var_name] = var_val
glob.update(local_ns)
exec(code, glob, ns)
timer.inner = ns["inner"]
# This is used to check if there is a huge difference between the
# best and worst timings.
# Issue: https://github.com/ipython/ipython/issues/6471
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for index in range(0, 10):
number = 10 ** index
time_number = timer.timeit(number)
if time_number >= 0.2:
break
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
worst = max(all_runs) / number
timeit_result = TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
# Restore global vars from conflict_globs
if conflict_globs:
glob.update(conflict_globs)
if not quiet:
# Check best timing is greater than zero to avoid a
# ZeroDivisionError.
# In cases where the slowest timing is lesser than a microsecond
# we assume that it does not really matter if the fastest
# timing is 4 times faster than the slowest timing or not.
if worst > 4 * best and best > 0 and worst > 1e-6:
print("The slowest run took %0.2f times longer than the "
"fastest. This could mean that an intermediate result "
"is being cached." % (worst / best))
print( timeit_result )
if tc > tc_min:
print("Compiler time: %.2f s" % tc)
if save_result:
self.shell.user_ns[opts.v] = timeit_result
if return_result:
return timeit_result
@no_var_expand
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"--no-raise-error",
action="store_true",
dest="no_raise_error",
help="If given, don't re-raise exceptions",
)
@magic_arguments.kwds(
epilog="""
Any remaining arguments will be treated as code to run.
"""
)
@skip_doctest
@needs_local_scope
@line_cell_magic
@output_can_be_silenced
def time(self, line="", cell=None, local_ns=None):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, you can time the cell body (a directly
following statement raises an error).
This function provides very basic timing functionality. Use the timeit
magic for more control over the measurement.
.. versionchanged:: 7.3
User variables are no longer expanded,
the magic line is always left unmodified.
.. versionchanged:: 8.3
The time magic now correctly propagates system-exiting exceptions
(such as ``KeyboardInterrupt`` invoked when interrupting execution)
rather than just printing out the exception traceback.
The non-system-exception will still be caught as before.
Examples
--------
::
In [1]: %time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: %time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: %time print('hello world')
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
.. note::
The time needed by Python to compile the given expression will be
reported if it is more than 0.1s.
In the example below, the actual exponentiation is done by Python
at compilation time, so while the expression can take a noticeable
amount of time to compute, that time is purely due to the
compilation::
In [5]: %time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: %time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
args, extra = magic_arguments.parse_argstring(self.time, line, partial=True)
line = " ".join(extra)
if line and cell:
raise UsageError("Can't use statement directly after '%%time'!")
if cell:
expr = self.shell.transform_cell(cell)
else:
expr = self.shell.transform_cell(line)
# Minimum time above which parse time will be reported
tp_min = 0.1
t0 = clock()
expr_ast = self.shell.compile.ast_parse(expr)
tp = clock() - t0
# Apply AST transformations
expr_ast = self.shell.transform_ast(expr_ast)
# Minimum time above which compilation time will be reported
tc_min = 0.1
expr_val = None
if len(expr_ast.body) == 1 and isinstance(expr_ast.body[0], ast.Expr):
mode = 'eval'
source = '<timed eval>'
expr_ast = ast.Expression(expr_ast.body[0].value)
else:
mode = 'exec'
source = '<timed exec>'
# multi-line %%time case
if len(expr_ast.body) > 1 and isinstance(expr_ast.body[-1], ast.Expr):
expr_val = expr_ast.body[-1]
expr_ast = expr_ast.body[:-1]
expr_ast = Module(expr_ast, [])
expr_val = ast.Expression(expr_val.value)
t0 = clock()
code = self.shell.compile(expr_ast, source, mode)
tc = clock() - t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
# Track whether to propagate exceptions or exit
exit_on_interrupt = False
interrupt_occured = False
captured_exception = None
if mode == "eval":
st = clock2()
try:
out = eval(code, glob, local_ns)
except KeyboardInterrupt as e:
captured_exception = e
interrupt_occured = True
exit_on_interrupt = True
except Exception as e:
captured_exception = e
interrupt_occured = True
if not args.no_raise_error:
exit_on_interrupt = True
end = clock2()
else:
st = clock2()
try:
exec(code, glob, local_ns)
out = None
# multi-line %%time case
if expr_val is not None:
code_2 = self.shell.compile(expr_val, source, 'eval')
out = eval(code_2, glob, local_ns)
except KeyboardInterrupt as e:
captured_exception = e
interrupt_occured = True
exit_on_interrupt = True
except Exception as e:
captured_exception = e
interrupt_occured = True
if not args.no_raise_error:
exit_on_interrupt = True
end = clock2()
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end - wall_st
cpu_user = end[0] - st[0]
cpu_sys = end[1] - st[1]
cpu_tot = cpu_user + cpu_sys
# On windows cpu_sys is always zero, so only total is displayed
if sys.platform != "win32":
print(
f"CPU times: user {_format_time(cpu_user)}, sys: {_format_time(cpu_sys)}, total: {_format_time(cpu_tot)}"
)
else:
print(f"CPU times: total: {_format_time(cpu_tot)}")
print(f"Wall time: {_format_time(wall_time)}")
if tc > tc_min:
print(f"Compiler : {_format_time(tc)}")
if tp > tp_min:
print(f"Parser : {_format_time(tp)}")
if interrupt_occured:
if exit_on_interrupt and captured_exception:
raise captured_exception
return
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage::
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r
Use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed at the
command line is used instead.
-q
Quiet macro definition. By default, a tag line is printed
to indicate the macro has been created, and then the contents of
the macro are printed. If this option is given, then no printout
is produced once the macro is created.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (print using %hist -n )::
44: x=1
45: y=3
46: z=x+y
47: print(x)
48: a=5
49: print('x',x,'y',y)
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print(macro_name)
"""
opts,args = self.parse_options(parameter_s,'rq',mode='list')
if not args: # List existing macros
return sorted(k for k,v in self.shell.user_ns.items() if isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
# print('rng',ranges) # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print(e.args[0])
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
if "q" not in opts:
print(
"Macro `%s` created. To execute, type its name (without quotes)." % name
)
print("=== Macro contents: ===")
print(macro, end=" ")
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"output",
type=str,
default="",
nargs="?",
help="""
The name of the variable in which to store output.
This is a ``utils.io.CapturedIO`` object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a ``show()`` method for displaying the output,
and ``__call__`` as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
""",
)
@magic_arguments.argument(
"--no-stderr", action="store_true", help="""Don't capture stderr."""
)
@magic_arguments.argument(
"--no-stdout", action="store_true", help="""Don't capture stdout."""
)
@magic_arguments.argument(
"--no-display",
action="store_true",
help="""Don't capture IPython's rich display."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
disp = not args.no_display
with capture_output(out, err, disp) as io:
self.shell.run_cell(cell)
if DisplayHook.semicolon_at_end_of_expression(cell):
if args.output in self.shell.user_ns:
del self.shell.user_ns[args.output]
elif args.output:
self.shell.user_ns[args.output] = io
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument("name", type=str, default="default", nargs="?")
@magic_arguments.argument(
"--remove", action="store_true", help="remove the current transformer"
)
@magic_arguments.argument(
"--list", action="store_true", help="list existing transformers name"
)
@magic_arguments.argument(
"--list-all",
action="store_true",
help="list existing transformers name and code template",
)
@line_cell_magic
def code_wrap(self, line, cell=None):
"""
Simple magic to quickly define a code transformer for all IPython's future input.
``__code__`` and ``__ret__`` are special variable that represent the code to run
and the value of the last expression of ``__code__`` respectively.
Examples
--------
.. ipython::
In [1]: %%code_wrap before_after
...: print('before')
...: __code__
...: print('after')
...: __ret__
In [2]: 1
before
after
Out[2]: 1
In [3]: %code_wrap --list
before_after
In [4]: %code_wrap --list-all
before_after :
print('before')
__code__
print('after')
__ret__
In [5]: %code_wrap --remove before_after
"""
args = magic_arguments.parse_argstring(self.code_wrap, line)
if args.list:
for name in self._transformers.keys():
print(name)
return
if args.list_all:
for name, _t in self._transformers.items():
print(name, ":")
print(indent(ast.unparse(_t.template), " "))
print()
return
to_remove = self._transformers.pop(args.name, None)
if to_remove in self.shell.ast_transformers:
self.shell.ast_transformers.remove(to_remove)
if cell is None or args.remove:
return
_trs = ReplaceCodeTransformer(ast.parse(cell))
self._transformers[args.name] = _trs
self.shell.ast_transformers.append(_trs)
def parse_breakpoint(text, current_file):
'''Returns (file, line) for file:line and (current_file, line) for line'''
colon = text.find(':')
if colon == -1:
return current_file, int(text)
else:
return text[:colon], int(text[colon+1:])
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append("%s%s" % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately characters outside of range(128) can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = ["s", "ms", "us", "ns"] # the safe value
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
try:
"μ".encode(sys.stdout.encoding)
units = ["s", "ms", "μs", "ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return "%.*g %s" % (precision, timespan * scaling[order], units[order])
| ExecutionMagics |
python | Lightning-AI__lightning | src/lightning/pytorch/trainer/connectors/checkpoint_connector.py | {
"start": 1791,
"end": 25298
} | class ____:
def __init__(self, trainer: "pl.Trainer") -> None:
self.trainer = trainer
self._ckpt_path: Optional[_PATH] = None
# flag to know if the user is changing the checkpoint path statefully. See `trainer.ckpt_path.setter`
self._user_managed: bool = False
self._loaded_checkpoint: dict[str, Any] = {}
@property
def _hpc_resume_path(self) -> Optional[str]:
dir_path_hpc = str(self.trainer.default_root_dir)
fs, path = url_to_fs(dir_path_hpc)
if not _is_dir(fs, path):
return None
max_version = self.__max_ckpt_version_in_folder(dir_path_hpc, "hpc_ckpt_")
if max_version is not None:
if isinstance(fs, LocalFileSystem):
return os.path.join(dir_path_hpc, f"hpc_ckpt_{max_version}.ckpt")
return dir_path_hpc + fs.sep + f"hpc_ckpt_{max_version}.ckpt"
return None
def resume_start(self, checkpoint_path: Optional[_PATH] = None, weights_only: Optional[bool] = None) -> None:
"""Attempts to pre-load the checkpoint file to memory, with the source path determined in this priority:
1. from HPC weights if `checkpoint_path` is ``None`` and on SLURM or passed keyword `"hpc"`.
2. from fault-tolerant auto-saved checkpoint if found
3. from `checkpoint_path` file if provided
4. don't restore
"""
self._ckpt_path = checkpoint_path
if not checkpoint_path:
log.debug("`checkpoint_path` not specified. Skipping checkpoint loading.")
return
rank_zero_info(f"Restoring states from the checkpoint path at {checkpoint_path}")
with pl_legacy_patch():
loaded_checkpoint = self.trainer.strategy.load_checkpoint(checkpoint_path, weights_only=weights_only)
self._loaded_checkpoint = _pl_migrate_checkpoint(loaded_checkpoint, checkpoint_path)
def _select_ckpt_path(
self, state_fn: TrainerFn, ckpt_path: Optional[_PATH], model_provided: bool, model_connected: bool
) -> Optional[_PATH]:
"""Called by the ``Trainer`` to select the checkpoint path source."""
if self._user_managed:
if ckpt_path:
rank_zero_warn(
f"`trainer.ckpt_path = {self._ckpt_path!r}` was called but then you"
f" passed `trainer.fit(ckpt_path={ckpt_path!r})`. The latter will be loaded."
)
# reset the previous path
self._ckpt_path = None
self._user_managed = False
ckpt_path = self._parse_ckpt_path(
state_fn,
ckpt_path,
model_provided=model_provided,
model_connected=model_connected,
)
else:
ckpt_path = self._ckpt_path
else:
ckpt_path = self._parse_ckpt_path(
state_fn,
ckpt_path,
model_provided=model_provided,
model_connected=model_connected,
)
return ckpt_path
def _parse_ckpt_path(
self, state_fn: TrainerFn, ckpt_path: Optional[_PATH], model_provided: bool, model_connected: bool
) -> Optional[_PATH]:
"""Converts the ``ckpt_path`` special values into an actual filepath, depending on the trainer
configuration."""
if ckpt_path is None and SLURMEnvironment.detect() and self._hpc_resume_path is not None:
ckpt_path = "hpc"
from lightning.pytorch.callbacks.on_exception_checkpoint import OnExceptionCheckpoint
ft_checkpoints = [cb for cb in self.trainer.callbacks if isinstance(cb, OnExceptionCheckpoint)]
fn = state_fn.value
if ckpt_path is None and ft_checkpoints and self.trainer.state.fn == TrainerFn.FITTING:
ckpt_path = "last"
rank_zero_warn(
f"`.{fn}(ckpt_path=None)` was called without a model."
" The last model of the previous `fit` call will be used."
f" You can pass `{fn}(ckpt_path='best')` to use the best model or"
f" `{fn}(ckpt_path='last')` to use the last model."
" If you pass a value, this warning will be silenced."
)
if model_provided and ckpt_path is None:
# use passed model to function without loading weights
return None
if model_connected and ckpt_path is None:
ckpt_path = "best"
ft_tip = (
" There is also an on-exception checkpoint available, however it is used by default only when fitting."
if ft_checkpoints
else ""
)
rank_zero_warn(
f"`.{fn}(ckpt_path=None)` was called without a model."
" The best model of the previous `fit` call will be used."
+ ft_tip
+ f" You can pass `.{fn}(ckpt_path='best')` to use the best model or"
f" `.{fn}(ckpt_path='last')` to use the last model."
" If you pass a value, this warning will be silenced."
)
if ckpt_path == "best":
if len(self.trainer.checkpoint_callbacks) > 1:
rank_zero_warn(
f'`.{fn}(ckpt_path="best")` is called with Trainer configured with multiple `ModelCheckpoint`'
" callbacks. It will use the best checkpoint path from first checkpoint callback."
)
if not self.trainer.checkpoint_callback:
raise ValueError(f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured.')
has_best_model_path = self.trainer.checkpoint_callback.best_model_path
if hasattr(self.trainer.checkpoint_callback, "best_model_path") and not has_best_model_path:
if self.trainer.fast_dev_run:
raise ValueError(
f'You cannot execute `.{fn}(ckpt_path="best")` with `fast_dev_run=True`.'
f" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`"
)
raise ValueError(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured to save the best model.'
)
# load best weights
ckpt_path = getattr(self.trainer.checkpoint_callback, "best_model_path", None)
elif ckpt_path == "last":
candidates = {getattr(ft, "ckpt_path", None) for ft in ft_checkpoints}
for callback in self.trainer.checkpoint_callbacks:
if isinstance(callback, ModelCheckpoint):
candidates |= callback._find_last_checkpoints(self.trainer)
candidates_fs = {path: get_filesystem(path) for path in candidates if path}
candidates_ts = {path: fs.modified(path) for path, fs in candidates_fs.items() if fs.exists(path)}
if not candidates_ts:
# not an error so it can be set and forget before the first `fit` run
rank_zero_warn(
f'.{fn}(ckpt_path="last") is set, but there is no last checkpoint available.'
" No checkpoint will be loaded. HINT: Set `ModelCheckpoint(..., save_last=True)`."
)
return None
ckpt_path = max(candidates_ts, key=candidates_ts.get) # type: ignore[arg-type]
elif ckpt_path == "hpc":
if not self._hpc_resume_path:
raise ValueError(
f'`.{fn}(ckpt_path="hpc")` is set but no HPC checkpoint was found.'
f" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`"
)
ckpt_path = self._hpc_resume_path
elif _is_registry(ckpt_path) and module_available("litmodels"):
ckpt_path = find_model_local_ckpt_path(
ckpt_path,
default_model_registry=self.trainer._model_registry,
default_root_dir=self.trainer.default_root_dir,
)
if not ckpt_path:
raise ValueError(
f"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please"
f" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`"
)
return ckpt_path
def resume_end(self) -> None:
"""Signal the connector that all states have resumed and memory for the checkpoint object can be released."""
assert self.trainer.state.fn is not None
if self._ckpt_path:
message = "Restored all states" if self.trainer.state.fn == TrainerFn.FITTING else "Loaded model weights"
rank_zero_info(f"{message} from the checkpoint at {self._ckpt_path}")
# free memory
self._loaded_checkpoint = {}
torch.cuda.empty_cache()
# wait for all to catch up
self.trainer.strategy.barrier("_CheckpointConnector.resume_end")
def restore(self, checkpoint_path: Optional[_PATH] = None, weights_only: Optional[bool] = None) -> None:
"""Attempt to restore everything at once from a 'PyTorch-Lightning checkpoint' file through file-read and
state-restore, in this priority:
1. from HPC weights if found
2. from `checkpoint_path` file if provided
3. don't restore
All restored states are listed in return value description of `dump_checkpoint`.
Args:
checkpoint_path: Path to a PyTorch Lightning checkpoint file.
"""
self.resume_start(checkpoint_path, weights_only=weights_only)
# restore module states
self.restore_datamodule()
self.restore_model()
# restore callback states
self.restore_callbacks()
# restore training state
self.restore_training_state()
self.resume_end()
def restore_datamodule(self) -> None:
"""Calls hooks on the datamodule to give it a chance to restore its state from the checkpoint."""
if not self._loaded_checkpoint:
return
trainer = self.trainer
datamodule = trainer.datamodule
if datamodule is not None and datamodule.__class__.__qualname__ in self._loaded_checkpoint:
call._call_lightning_datamodule_hook(
trainer, "load_state_dict", self._loaded_checkpoint[datamodule.__class__.__qualname__]
)
def restore_model(self) -> None:
"""Restores a model's weights from a PyTorch Lightning checkpoint.
Hooks are called first to give the LightningModule a chance to modify the contents, then finally the model gets
updated with the loaded weights.
"""
if not self._loaded_checkpoint:
return
# hook: give user access to checkpoint if needed.
call._call_lightning_module_hook(self.trainer, "on_load_checkpoint", self._loaded_checkpoint)
# restore model state_dict
self.trainer.strategy.load_model_state_dict(
self._loaded_checkpoint,
strict=self.trainer.lightning_module.strict_loading,
)
def restore_training_state(self) -> None:
"""Restore the trainer state from the pre-loaded checkpoint.
This includes the precision settings, loop progress, optimizer states and learning rate scheduler states.
"""
if not self._loaded_checkpoint:
return
# restore precision plugin (scaler etc.)
self.restore_precision_plugin_state()
# restore loops and their progress
self.restore_loops()
assert self.trainer.state.fn is not None
if self.trainer.state.fn == TrainerFn.FITTING:
# restore optimizers and schedulers state
self.restore_optimizers_and_schedulers()
def restore_precision_plugin_state(self) -> None:
"""Restore the precision plugin state from the pre-loaded checkpoint."""
prec_plugin = self.trainer.precision_plugin
prec_plugin.on_load_checkpoint(self._loaded_checkpoint)
if prec_plugin.__class__.__qualname__ in self._loaded_checkpoint:
prec_plugin.load_state_dict(self._loaded_checkpoint[prec_plugin.__class__.__qualname__])
# old checkpoints compatibility
if "native_amp_scaling_state" in self._loaded_checkpoint and isinstance(prec_plugin, MixedPrecision):
prec_plugin.load_state_dict(self._loaded_checkpoint["native_amp_scaling_state"])
def restore_callbacks(self) -> None:
"""Restores all callbacks from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
trainer = self.trainer
call._call_callbacks_on_load_checkpoint(trainer, self._loaded_checkpoint)
call._call_callbacks_load_state_dict(trainer, self._loaded_checkpoint)
def restore_loops(self) -> None:
"""Restores the loop progress from the pre-loaded checkpoint.
Calls hooks on the loops to give it a chance to restore its state from the checkpoint.
"""
if not self._loaded_checkpoint:
return
fit_loop = self.trainer.fit_loop
assert self.trainer.state.fn is not None
state_dict = self._loaded_checkpoint.get("loops")
if state_dict is not None:
if self.trainer.state.fn == TrainerFn.FITTING:
fit_loop.load_state_dict(state_dict["fit_loop"])
elif self.trainer.state.fn == TrainerFn.VALIDATING:
self.trainer.validate_loop.load_state_dict(state_dict["validate_loop"])
elif self.trainer.state.fn == TrainerFn.TESTING:
self.trainer.test_loop.load_state_dict(state_dict["test_loop"])
elif self.trainer.state.fn == TrainerFn.PREDICTING:
self.trainer.predict_loop.load_state_dict(state_dict["predict_loop"])
if self.trainer.state.fn != TrainerFn.FITTING:
return
# crash if max_epochs is lower then the current epoch from the checkpoint
if (
self.trainer.max_epochs != -1
and self.trainer.max_epochs is not None
and self.trainer.current_epoch > self.trainer.max_epochs
):
raise MisconfigurationException(
f"You restored a checkpoint with current_epoch={self.trainer.current_epoch},"
f" but you have set Trainer(max_epochs={self.trainer.max_epochs})."
)
def restore_optimizers_and_schedulers(self) -> None:
"""Restores the optimizers and learning rate scheduler states from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
if self.trainer.strategy.lightning_restore_optimizer:
# validation
if "optimizer_states" not in self._loaded_checkpoint:
raise KeyError(
"Trying to restore optimizer state but checkpoint contains only the model."
" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."
)
self.restore_optimizers()
if "lr_schedulers" not in self._loaded_checkpoint:
raise KeyError(
"Trying to restore learning rate scheduler state but checkpoint contains only the model."
" This is probably due to `ModelCheckpoint.save_weights_only` being set to `True`."
)
self.restore_lr_schedulers()
def restore_optimizers(self) -> None:
"""Restores the optimizer states from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
# restore the optimizers
self.trainer.strategy.load_optimizer_state_dict(self._loaded_checkpoint)
def restore_lr_schedulers(self) -> None:
"""Restores the learning rate scheduler states from the pre-loaded checkpoint."""
if not self._loaded_checkpoint:
return
# restore the lr schedulers
lr_schedulers = self._loaded_checkpoint["lr_schedulers"]
for config, lrs_state in zip(self.trainer.lr_scheduler_configs, lr_schedulers):
config.scheduler.load_state_dict(lrs_state)
def _restore_modules_and_callbacks(
self, checkpoint_path: Optional[_PATH] = None, weights_only: Optional[bool] = None
) -> None:
# restore modules after setup
self.resume_start(checkpoint_path, weights_only=weights_only)
self.restore_model()
self.restore_datamodule()
self.restore_callbacks()
def dump_checkpoint(self, weights_only: Optional[bool] = None) -> dict:
"""Creating a model checkpoint dictionary object from various component states.
Args:
weights_only: If True, only saves model and loops state_dict objects. If False,
additionally saves callbacks, optimizers, schedulers, and precision plugin states.
Return:
structured dictionary: {
'epoch': training epoch
'global_step': training global step
'pytorch-lightning_version': The version of PyTorch Lightning that produced this checkpoint
'callbacks': "callback specific state"[] # if not weights_only
'optimizer_states': "PT optim's state_dict"[] # if not weights_only
'lr_schedulers': "PT sched's state_dict"[] # if not weights_only
'state_dict': Model's state_dict (e.g. network weights)
precision_plugin.__class__.__qualname__: precision plugin state_dict # if not weights_only
CHECKPOINT_HYPER_PARAMS_NAME:
CHECKPOINT_HYPER_PARAMS_KEY:
CHECKPOINT_HYPER_PARAMS_TYPE:
something_cool_i_want_to_save: anything you define through model.on_save_checkpoint
LightningDataModule.__class__.__qualname__: pl DataModule's state
}
"""
trainer = self.trainer
model = trainer.lightning_module
datamodule = trainer.datamodule
checkpoint = {
# the epoch and global step are saved for compatibility but they are not relevant for restoration
"epoch": trainer.current_epoch,
"global_step": trainer.global_step,
"pytorch-lightning_version": pl.__version__,
"state_dict": self._get_lightning_module_state_dict(),
"loops": self._get_loops_state_dict(),
}
if weights_only is None:
weights_only = False
log.info("`weights_only` was not set, defaulting to `False`.")
if not weights_only:
# dump callbacks
checkpoint["callbacks"] = call._call_callbacks_state_dict(trainer)
optimizer_states = []
for i, optimizer in enumerate(trainer.optimizers):
# Rely on accelerator to dump optimizer state
optimizer_state = trainer.strategy.optimizer_state(optimizer)
optimizer_states.append(optimizer_state)
checkpoint["optimizer_states"] = optimizer_states
# dump lr schedulers
lr_schedulers = []
for config in trainer.lr_scheduler_configs:
lr_schedulers.append(config.scheduler.state_dict())
checkpoint["lr_schedulers"] = lr_schedulers
# precision plugin
prec_plugin = trainer.precision_plugin
prec_plugin_state_dict = prec_plugin.state_dict()
if prec_plugin_state_dict:
checkpoint[prec_plugin.__class__.__qualname__] = prec_plugin_state_dict
prec_plugin.on_save_checkpoint(checkpoint)
if _OMEGACONF_AVAILABLE:
from omegaconf import Container
# dump hyper-parameters
for obj in (model, datamodule):
if obj and obj.hparams:
if hasattr(obj, "_hparams_name"):
checkpoint[obj.CHECKPOINT_HYPER_PARAMS_NAME] = obj._hparams_name
# dump arguments
if _OMEGACONF_AVAILABLE and isinstance(obj.hparams, Container):
checkpoint[obj.CHECKPOINT_HYPER_PARAMS_KEY] = obj.hparams
checkpoint[obj.CHECKPOINT_HYPER_PARAMS_TYPE] = type(obj.hparams)
else:
checkpoint[obj.CHECKPOINT_HYPER_PARAMS_KEY] = dict(obj.hparams)
# dump stateful datamodule
if datamodule is not None:
datamodule_state_dict = call._call_lightning_datamodule_hook(trainer, "state_dict")
if datamodule_state_dict:
checkpoint[datamodule.__class__.__qualname__] = datamodule_state_dict
# on_save_checkpoint hooks
if not weights_only:
# if state is returned from callback's on_save_checkpoint
# it overrides the returned state from callback's state_dict
# support for returning state in on_save_checkpoint
# will be removed in v1.8
call._call_callbacks_on_save_checkpoint(trainer, checkpoint)
call._call_lightning_module_hook(trainer, "on_save_checkpoint", checkpoint)
return checkpoint
def _get_lightning_module_state_dict(self) -> dict[str, Tensor]:
return self.trainer.strategy.lightning_module_state_dict()
def _get_loops_state_dict(self) -> dict[str, Any]:
return {
"fit_loop": self.trainer.fit_loop.state_dict(),
"validate_loop": self.trainer.validate_loop.state_dict(),
"test_loop": self.trainer.test_loop.state_dict(),
"predict_loop": self.trainer.predict_loop.state_dict(),
}
@staticmethod
def __max_ckpt_version_in_folder(dir_path: _PATH, name_key: str = "ckpt_") -> Optional[int]:
"""List up files in `dir_path` with `name_key`, then yield maximum suffix number.
Args:
dir_path: path of directory which may contain files whose name include `name_key`
name_key: file name prefix
Returns:
None if no-corresponding-file else maximum suffix number
"""
# check directory existence
fs, uri = url_to_fs(str(dir_path))
if not fs.exists(dir_path):
return None
# check corresponding file existence
files = [os.path.basename(f["name"]) for f in fs.listdir(uri)]
files = [x for x in files if name_key in x]
if len(files) == 0:
return None
# extract suffix number
ckpt_vs = []
for name in files:
name = name.split(name_key)[-1]
name = re.sub("[^0-9]", "", name)
ckpt_vs.append(int(name))
return max(ckpt_vs)
@staticmethod
def __get_max_ckpt_path_from_folder(folder_path: _PATH) -> str:
"""Get path of maximum-epoch checkpoint in the folder."""
max_suffix = _CheckpointConnector.__max_ckpt_version_in_folder(folder_path)
ckpt_number = max_suffix if max_suffix is not None else 0
return f"{folder_path}/hpc_ckpt_{ckpt_number}.ckpt"
@staticmethod
def hpc_save_path(folderpath: _PATH) -> str:
max_suffix = _CheckpointConnector.__max_ckpt_version_in_folder(folderpath)
ckpt_number = (max_suffix if max_suffix is not None else 0) + 1
return os.path.join(folderpath, f"hpc_ckpt_{ckpt_number}.ckpt")
| _CheckpointConnector |
python | huggingface__transformers | tests/models/git/test_modeling_git.py | {
"start": 13714,
"end": 17409
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (GitModel, GitForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": GitModel,
"image-to-text": GitForCausalLM,
"text-generation": GitForCausalLM,
"image-text-to-text": GitForCausalLM,
"any-to-any": GitForCausalLM,
}
if is_torch_available()
else {}
)
# special case for GitForCausalLM model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_CAUSAL_LM_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def setUp(self):
self.model_tester = GitModelTester(self)
self.config_tester = ConfigTester(self, config_class=GitConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs)
def test_batched_generate_captioning(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester._test_batched_generate_captioning(*config_and_inputs)
def _check_attentions_for_generate(
self, batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
):
# GIT attention shape depends on image inputs, overwrite
image_length = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
prompt_length += image_length
output_length += image_length
super()._check_attentions_for_generate(
batch_size, attentions, prompt_length, output_length, config, decoder_past_key_values
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, prompt_length, output_length, config, use_cache=False
):
# GIT attention shape depends on image inputs, overwrite
image_length = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1)
prompt_length += image_length
output_length += image_length
super()._check_hidden_states_for_generate(
batch_size, hidden_states, prompt_length, output_length, config, use_cache=use_cache
)
@slow
def test_model_from_pretrained(self):
model_name = "microsoft/git-base"
model = GitModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(reason="GIT has pixel values as additional input")
def test_beam_search_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="GIT has pixel values as additional input")
def test_greedy_generate_dict_outputs_use_cache(self):
pass
@unittest.skip(reason="GIT input and output sequence lengths are not equal due to pixel values additional input")
def test_forward_with_logits_to_keep(self):
pass
@require_torch
@require_vision
@slow
| GitModelTest |
python | cython__cython | Cython/Plex/Errors.py | {
"start": 210,
"end": 372
} | class ____(PlexError):
def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
| InvalidToken |
python | getsentry__sentry | src/sentry/utils/sdk.py | {
"start": 11478,
"end": 30605
} | class ____(NamedTuple):
sentry4sentry: str | None
sentry_saas: str | None
def _get_sdk_options() -> tuple[SdkConfig, Dsns]:
sdk_options = settings.SENTRY_SDK_CONFIG.copy()
sdk_options["send_client_reports"] = True
sdk_options["add_full_stack"] = True
sdk_options["enable_http_request_source"] = True
sdk_options["traces_sampler"] = traces_sampler
sdk_options["before_send_transaction"] = before_send_transaction
sdk_options["before_send"] = before_send
sdk_options["release"] = (
f"backend@{sdk_options['release']}" if "release" in sdk_options else None
)
sdk_options.setdefault("_experiments", {}).update(
transport_http2=options.get("sdk_http2_experiment.enabled"),
before_send_log=before_send_log,
enable_logs=True,
enable_metrics=True,
)
# Modify SENTRY_SDK_CONFIG in your deployment scripts to specify your desired DSN
dsns = Dsns(
sentry4sentry=sdk_options.pop("dsn", None),
sentry_saas=sdk_options.pop("relay_dsn", None),
)
return sdk_options, dsns
def configure_sdk():
"""
Setup and initialize the Sentry SDK.
"""
sdk_options, dsns = _get_sdk_options()
if settings.SPOTLIGHT:
sdk_options["spotlight"] = (
settings.SPOTLIGHT_ENV_VAR if settings.SPOTLIGHT_ENV_VAR.startswith("http") else True
)
internal_project_key = get_project_key()
if dsns.sentry4sentry:
transport = make_transport(get_options(dsn=dsns.sentry4sentry, **sdk_options))
sentry4sentry_transport = patch_transport_for_instrumentation(transport, "upstream")
else:
sentry4sentry_transport = None
if dsns.sentry_saas:
transport = make_transport(get_options(dsn=dsns.sentry_saas, **sdk_options))
sentry_saas_transport = patch_transport_for_instrumentation(transport, "relay")
elif settings.IS_DEV and not settings.SENTRY_USE_RELAY:
sentry_saas_transport = None
elif internal_project_key and internal_project_key.dsn_private:
transport = make_transport(get_options(dsn=internal_project_key.dsn_private, **sdk_options))
sentry_saas_transport = patch_transport_for_instrumentation(transport, "relay")
else:
sentry_saas_transport = None
if settings.SENTRY_CONTINUOUS_PROFILING_ENABLED:
sdk_options["profile_session_sample_rate"] = float(
settings.SENTRY_PROFILES_SAMPLE_RATE or 0
)
sdk_options["profile_lifecycle"] = settings.SENTRY_PROFILE_LIFECYCLE
elif settings.SENTRY_PROFILING_ENABLED:
sdk_options["profiles_sampler"] = profiles_sampler
sdk_options["profiler_mode"] = settings.SENTRY_PROFILER_MODE
class MultiplexingTransport(sentry_sdk.transport.Transport):
"""
Sends all envelopes and events to two Sentry instances:
- Sentry SaaS (aka Sentry.io) and
- Sentry4Sentry (aka S4S)
"""
def capture_envelope(self, envelope):
# Temporarily capture envelope counts to compare to ingested
# transactions.
metrics.incr("internal.captured.events.envelopes")
transaction = envelope.get_transaction_event()
if transaction:
metrics.incr("internal.captured.events.transactions")
# Assume only transactions get sent via envelopes
if options.get("transaction-events.force-disable-internal-project"):
return
self._capture_anything("capture_envelope", envelope)
def capture_event(self, event):
if event.get("type") == "transaction" and options.get(
"transaction-events.force-disable-internal-project"
):
return
self._capture_anything("capture_event", event)
def _capture_anything(self, method_name, *args, **kwargs):
# Sentry4Sentry (upstream) should get the event first because
# it is most isolated from the sentry installation.
if sentry4sentry_transport:
metrics.incr("internal.captured.events.upstream")
# TODO(mattrobenolt): Bring this back safely.
# from sentry import options
# install_id = options.get('sentry:install-id')
# if install_id:
# event.setdefault('tags', {})['install-id'] = install_id
s4s_args = args
# We want to control whether we want to send metrics at the s4s upstream.
if (
not settings.SENTRY_SDK_UPSTREAM_METRICS_ENABLED
and method_name == "capture_envelope"
):
args_list = list(args)
envelope = args_list[0]
# We filter out all the statsd envelope items, which contain custom metrics sent by the SDK.
# unless we allow them via a separate sample rate.
safe_items = [
x
for x in envelope.items
if x.data_category != "statsd"
or in_random_rollout("store.allow-s4s-ddm-sample-rate")
]
if len(safe_items) != len(envelope.items):
relay_envelope = copy.copy(envelope)
relay_envelope.items = safe_items
s4s_args = (relay_envelope, *args_list[1:])
getattr(sentry4sentry_transport, method_name)(*s4s_args, **kwargs)
if sentry_saas_transport and options.get("store.use-relay-dsn-sample-rate") == 1:
# If this is an envelope ensure envelope and its items are distinct references
if method_name == "capture_envelope":
args_list = list(args)
envelope = args_list[0]
relay_envelope = copy.copy(envelope)
relay_envelope.items = envelope.items.copy()
args = (relay_envelope, *args_list[1:])
if sentry_saas_transport:
if is_current_event_safe():
metrics.incr("internal.captured.events.relay")
getattr(sentry_saas_transport, method_name)(*args, **kwargs)
else:
metrics.incr(
"internal.uncaptured.events.relay",
skip_internal=False,
tags={"reason": "unsafe"},
)
def record_lost_event(self, *args, **kwargs):
# pass through client report recording to sentry_saas_transport
# not entirely accurate for some cases like rate limiting but does the job
if sentry_saas_transport:
record = getattr(sentry_saas_transport, "record_lost_event", None)
if record:
record(*args, **kwargs)
def is_healthy(self):
if sentry4sentry_transport:
if not sentry4sentry_transport.is_healthy():
return False
if sentry_saas_transport:
if not sentry_saas_transport.is_healthy():
return False
return True
def flush(
self,
timeout,
callback=None,
):
# flush transports in case we received a kill signal
if sentry4sentry_transport:
getattr(sentry4sentry_transport, "flush")(timeout, callback)
if sentry_saas_transport:
getattr(sentry_saas_transport, "flush")(timeout, callback)
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.threading import ThreadingIntegration
sentry_sdk.init(
# set back the sentry4sentry_dsn popped above since we need a default dsn on the client
# for dynamic sampling context public_key population
dsn=dsns.sentry4sentry,
transport=MultiplexingTransport(),
integrations=[
DjangoAtomicIntegration(),
DjangoIntegration(signals_spans=False, cache_spans=True, middleware_spans=False),
# This makes it so all levels of logging are recorded as breadcrumbs,
# but none are captured as events (that's handled by the `internal`
# logger defined in `server.py`, which ignores the levels set
# in the integration and goes straight to the underlying handler class).
LoggingIntegration(event_level=None, sentry_logs_level=logging.INFO),
RustInfoIntegration(),
RedisIntegration(),
ThreadingIntegration(),
],
**sdk_options,
)
def check_tag_for_scope_bleed(
tag_key: str, expected_value: str | int, add_to_scope: bool = True
) -> None:
"""
Detect if the given tag has already been set to a value different than what we expect. If we
find a mismatch, log a warning and, if `add_to_scope` is `True`, add scope bleed tags to the
scope. (An example of when we don't want to add scope bleed tag is if we're only logging a
warning rather than capturing an event.)
"""
# force the string version to prevent false positives
expected_value = str(expected_value)
scope = sentry_sdk.get_isolation_scope()
current_value = scope._tags.get(tag_key)
if not current_value:
return
# ensure we're comparing apples to apples
current_value = str(current_value)
# There are times where we can only narrow down the current org to a list, for example if
# we've derived it from an integration, since integrations can be shared across multiple orgs.
if tag_key == "organization.slug" and current_value == "[multiple orgs]":
# Currently, we don't have access in this function to the underlying slug list
# corresponding to an incoming "[multiple orgs]" tag, so we can't check it against the
# current list. Regardless of whether the lists would match, it's currently not flagged
# as scope bleed. (Fortunately, that version of scope bleed should be a pretty rare case,
# since only ~3% of integrations belong to multiple orgs, making the chance of it
# happening twice around 0.1%.) So for now, just skip that case.
if expected_value != "[multiple orgs]":
# If we've now figured out which of that list is correct, don't count it as a mismatch.
# But if it currently is a list and `expected_value` is something *not* in that list,
# we're almost certainly dealing with scope bleed, so we should continue with our check.
current_org_list = scope._contexts.get("organization", {}).get("multiple possible", [])
if current_org_list and expected_value in current_org_list:
return
if current_value != expected_value:
extra = {
f"previous_{tag_key}_tag": current_value,
f"new_{tag_key}_tag": expected_value,
}
if add_to_scope:
scope.set_tag("possible_mistag", True)
scope.set_tag(f"scope_bleed.{tag_key}", True)
merge_context_into_scope("scope_bleed", extra, scope)
logger.warning("Tag already set and different (%s).", tag_key, extra=extra)
def get_transaction_name_from_request(request: Request) -> str:
"""
Given an incoming request, derive a parameterized transaction name, if possible. Based on the
implementation in `_set_transaction_name_and_source` in the SDK, which is what it uses to label
request transactions. See https://github.com/getsentry/sentry-python/blob/6c68cf4742e6f65da431210085ee095ba6535cee/sentry_sdk/integrations/django/__init__.py#L333.
If parameterization isn't possible, use the request's path.
"""
transaction_name = request.path_info
try:
# Note: In spite of the name, the legacy resolver is still what's used in the python SDK
resolved_transaction_name = LEGACY_RESOLVER.resolve(
request.path_info, urlconf=getattr(request, "urlconf", None)
)
except Exception:
pass
else:
if resolved_transaction_name is not None:
transaction_name = resolved_transaction_name
return transaction_name
def check_current_scope_transaction(
request: Request,
) -> dict[str, str] | None:
"""
Check whether the name of the transaction on the current scope matches what we'd expect, given
the request being handled.
If the transaction values match, return None. If they don't, return a dictionary including both
values.
Note: Ignores scope `transaction` values with `source = "custom"`, indicating a value which has
been set maunually.
"""
scope = sentry_sdk.get_current_scope()
transaction_from_request = get_transaction_name_from_request(request)
if (
scope._transaction is not None
and scope._transaction != transaction_from_request
and scope._transaction_info.get("source") != "custom"
):
return {
"scope_transaction": scope._transaction,
"request_transaction": transaction_from_request,
}
else:
return None
def capture_exception_with_scope_check(
error, scope: Scope | None = None, request: Request | None = None, **scope_args
):
"""
A wrapper around `sentry_sdk.capture_exception` which checks scope `transaction` against the
given Request object, to help debug scope bleed problems.
"""
# The SDK's version of `capture_exception` accepts either a `Scope` object or scope kwargs.
# Regardless of which one the caller passed, convert the data into a `Scope` object
extra_scope = scope or Scope()
extra_scope.update_from_kwargs(**scope_args)
# We've got a weird scope bleed problem, where, among other things, errors are getting tagged
# with the wrong transaction value, so record any possible mismatch.
transaction_mismatch = check_current_scope_transaction(request) if request else None
if transaction_mismatch:
# TODO: We probably should add this data to the scope in `check_current_scope_transaction`
# instead, but the whole point is that right now it's unclear how trustworthy ambient scope is
extra_scope.set_tag("scope_bleed.transaction", True)
merge_context_into_scope("scope_bleed", transaction_mismatch, extra_scope)
return sentry_sdk.capture_exception(error, scope=extra_scope)
def bind_organization_context(organization: Organization | RpcOrganization) -> None:
# Callable to bind additional context for the Sentry SDK
helper = settings.SENTRY_ORGANIZATION_CONTEXT_HELPER
scope = sentry_sdk.get_isolation_scope()
# XXX(dcramer): this is duplicated in organizationContext.jsx on the frontend
with sentry_sdk.start_span(op="other", name="bind_organization_context"):
# This can be used to find errors that may have been mistagged
check_tag_for_scope_bleed("organization.slug", organization.slug)
scope.set_tag("organization", organization.id)
scope.set_tag("organization.slug", organization.slug)
scope.set_context("organization", {"id": organization.id, "slug": organization.slug})
if helper:
try:
helper(scope=scope, organization=organization)
except Exception:
sdk_logger.exception(
"internal-error.organization-context",
extra={"organization_id": organization.id},
)
_AMBIGUOUS_ORG_CUTOFF = 50
def bind_ambiguous_org_context(
orgs: Sequence[Organization] | Sequence[RpcOrganization] | list[str], source: str | None = None
) -> None:
"""
Add org context information to the scope in the case where the current org might be one of a
number of known orgs (for example, if we've attempted to derive the current org from an
Integration instance, which can be shared by multiple orgs).
"""
MULTIPLE_ORGS_TAG = "[multiple orgs]"
def parse_org_slug(x: Organization | RpcOrganization | str) -> str:
if isinstance(x, str):
return x
return x.slug
org_slugs = [parse_org_slug(org) for org in orgs]
# Right now there is exactly one Integration instance shared by more than 30 orgs (the generic
# GitLab integration, at the moment shared by ~500 orgs), so 50 should be plenty for all but
# that one instance
if len(orgs) > _AMBIGUOUS_ORG_CUTOFF:
org_slugs = org_slugs[: _AMBIGUOUS_ORG_CUTOFF - 1] + [
f"... ({len(orgs) - (_AMBIGUOUS_ORG_CUTOFF - 1)} more)"
]
scope = sentry_sdk.get_isolation_scope()
# It's possible we've already set the org context with one of the orgs in our list,
# somewhere we could narrow it down to one org. In that case, we don't want to overwrite
# that specific data with this ambiguous data.
current_org_slug_tag = scope._tags.get("organization.slug")
if current_org_slug_tag and current_org_slug_tag in org_slugs:
return
# It's also possible that the org seems already to be set but it's just a case of scope
# bleed. In that case, we want to test for that and proceed.
check_tag_for_scope_bleed("organization.slug", MULTIPLE_ORGS_TAG)
scope.set_tag("organization", MULTIPLE_ORGS_TAG)
scope.set_tag("organization.slug", MULTIPLE_ORGS_TAG)
scope.set_context(
"organization", {"multiple possible": org_slugs, "source": source or "unknown"}
)
def get_trace_id():
span = sentry_sdk.get_current_span()
if span is not None:
return span.get_trace_context().get("trace_id")
return None
def set_span_attribute(data_name, value):
span = sentry_sdk.get_current_span()
if span is not None:
span.set_data(data_name, value)
def merge_context_into_scope(
context_name: str, context_data: Mapping[str, Any], scope: Scope
) -> None:
"""
Add the given context to the given scope, merging the data in if a context with the given name
already exists.
"""
existing_context = scope._contexts.setdefault(context_name, {})
existing_context.update(context_data)
__all__ = (
"LEGACY_RESOLVER",
"Scope",
"UNSAFE_FILES",
"UNSAFE_TAG",
"before_send_transaction",
"bind_ambiguous_org_context",
"bind_organization_context",
"capture_exception",
"capture_exception_with_scope_check",
"capture_message",
"check_current_scope_transaction",
"check_tag_for_scope_bleed",
"configure_sdk",
"get_options",
"get_project_key",
"get_transaction_name_from_request",
"is_current_event_safe",
"isolation_scope",
"make_transport",
"merge_context_into_scope",
"patch_transport_for_instrumentation",
"isolation_scope",
"set_current_event_project",
"traces_sampler",
)
| Dsns |
python | tensorflow__tensorflow | tensorflow/python/saved_model/saved_model_test.py | {
"start": 4286,
"end": 58558
} | class ____(SavedModelTestBase):
def _validate_assets(self,
export_dir,
asset_file_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name,
asset_file_def[asset_id].filename)
self.assertEqual(expected_asset_tensor_name,
asset_file_def[asset_id].tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
{"foo_inputs": tensor_info}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
def _validate_sig_def_keys(self, builder, valid_tensor_info, invalid_key):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_key": valid_tensor_info}, "foo")
self.assertRaises(
KeyError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={invalid_key: foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegex(
IOError, "SavedModel file does not exist at: %s" % export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegex(IOError, "Cannot parse file"):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegex(
IOError,
"Cannot parse file.*%s" % constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly
# once.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU],
export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU],
export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Try restoring a graph with a non-existent tag. This should yield a
# runtime error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag
# matching for meta graph defs follows "all" semantics, this should yield
# a runtime error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the
# previous graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, self._eval(collection_vars[0]))
self.assertEqual(2, self._eval(collection_vars[1]))
# Restore the graph with tag "bar", whose variables were not saved. Only
# the subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, self._eval(collection_vars[0]))
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with no variables.
with self.session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
# Restore the graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(42, self._eval("v"))
# An attempt to create another builder with the same export directory
# should result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder._SavedModelBuilder,
export_dir)
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
# Restore the graph with tag "bar", whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with a single variable added to a collection. SavedModel invoked
# to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
v = variable_v1.VariableV1(42, name="v")
ops.add_to_collection("foo_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(42, self.evaluate(v))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
v = variable_v1.VariableV1(43, name="v")
ops.add_to_collection("bar_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(43, self.evaluate(v))
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, self._eval(collection_foo_vars[0]))
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, self._eval(collection_bar_vars[0]))
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Graph with a single variable and a single entry in the signature def
# map. SavedModel is invoked to add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(
dict(), dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the
# signature def map. No weights are saved by SavedModel.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(
dict(), dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key"
# defined in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(
dict(), dict(), "foo_new")
builder.add_meta_graph(["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef
# map corresponding to "foo_key" should exist.
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42,
self._eval(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0]))
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder._SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
valid_tensor_info = meta_graph_pb2.TensorInfo()
valid_tensor_info.name = "foo"
valid_tensor_info.dtype = types_pb2.DT_FLOAT
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.INIT_OP_SIGNATURE_KEY)
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.TRAIN_OP_SIGNATURE_KEY)
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
with ops.Graph().as_default():
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
def testSignatureDefValidationSucceedsWithCoo(self):
with ops.Graph().as_default():
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
def testSignatureDefValidationSucceedsWithRagged(self):
with ops.Graph().as_default():
ragged_tensor = ragged_factory_ops.constant([[1, 2], [3]])
tensor_with_ragged = utils.build_tensor_info(ragged_tensor)
export_dir = self._get_export_dir(
"test_signature_def_validation_ragged_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_ragged)
export_dir = self._get_export_dir(
"test_signature_def_validation_ragged_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_ragged)
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()),
compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def,
"hello42.txt", "foo bar bak",
"asset_file_tensor:0")
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_1",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1",
asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz " + idx,
"asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_" + idx,
"foo bar baz " + idx,
"asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_assets(export_dir, foo_graph.asset_file_def,
"hello42.txt", "foo bar baz 0",
"asset_file_tensor_0:0")
def testCustomInitOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variable_v1.VariableV1(1, name="v1")
v2 = variable_v1.VariableV1(2, name="v2")
# Initialize another variable `v3` to 42.
v3 = variable_v1.VariableV1(42, name="v3")
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1, v2)
custom_init_op = control_flow_ops.group(
state_ops.assign(v3, add_v1_v2))
self.evaluate(variables.global_variables_initializer())
self.evaluate(custom_init_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], init_op=custom_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
# Evaluates to the sum of the first two variables and assigned as part
# of the main_op, following a restore.
self.assertEqual(3, self._eval("v3"))
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variable_v1.VariableV1(1, name="v1")
v2 = variable_v1.VariableV1(2, name="v2")
self.evaluate(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
if resource_variables_toggle.resource_variables_enabled():
self.assertEqual(
loader_impl.get_train_op(meta_graph_def).type,
"AssignAddVariableOp")
else:
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), tensor_lib.Tensor)
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
variable_v1.VariableV1(1, name="v1")
variable_v1.VariableV1(2, name="v2")
self.evaluate(variables.global_variables_initializer())
train_op = control_flow_ops.group()
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Operation)
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variable_v1.VariableV1(1, name="v1")
v2 = variable_v1.VariableV1(2, name="v2")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph(["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
if resource_variables_toggle.resource_variables_enabled():
self.assertEqual(
loader_impl.get_train_op(meta_graph_def).type,
"AssignAddVariableOp")
else:
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), tensor_lib.Tensor)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_list = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_assets(export_dir, bar_graph.asset_file_def, "bar.txt",
"content_bar", "asset_file_tensor:0")
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_assets(export_dir, bar_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variable_v1.VariableV1(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variable_v1.VariableV1(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variable_v1.VariableV1(
1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
control_flow_ops.group(assign_v3, name="init_op")
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, self._eval("v1"))
self.assertEqual(2, self._eval("v2"))
sess.run("init_op")
self.assertEqual(3, self._eval("v3"))
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
self.evaluate(variables.global_variables_initializer())
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default() as graph:
with self.session(graph=ops.Graph()) as sess:
variable_v1.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default() as graph:
with self.session(graph=ops.Graph()) as sess:
variable_v1.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
variable_v1.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_list=asset_list)
# Save the asset file path for later comparison.
asset_file_path = asset_list[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should
# be unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(42, self._eval("scope_name/v"))
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_list = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_list))
self.assertEqual(asset_file_path, asset_list[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0", asset_list[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_assets(export_dir, graph_proto.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
# Specify a device and save a variable.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were
# saved without any device information.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(42, self._eval("v"))
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variable_v1.VariableV1(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegex(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegex(errors.InvalidArgumentError,
"(?s)No OpKernel was registered.*DOUBLE"):
loader.load(sess, ["foo"], export_dir)
def testFingerprint(self):
self.skipTest("TF1 fingerprinting disabled in OSS.")
export_dir = self._get_export_dir("fingerprint")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with ops.Graph().as_default():
with self.session(graph=ops.Graph()) as sess:
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Load the model's fingerprint.
try:
fingerprinting.read_fingerprint(export_dir)
except ValueError:
self.fail("Fingerprint read failed.")
| SavedModelTest |
python | virgili0__Virgilio | Tools/regex-bin/regexPrinter.py | {
"start": 2909,
"end": 3161
} | class ____(TreeNode):
def __init__(self):
TreeNode.__init__(self, None, None, None)
def get_printer(self, values):
def printer():
for value in values:
yield value
return printer
| NoQuantifierNode |
python | huggingface__transformers | src/transformers/models/video_llava/modeling_video_llava.py | {
"start": 4882,
"end": 5855
} | class ____(nn.Module):
def __init__(self, config: VideoLlavaConfig):
super().__init__()
# We have hidden_size * the number of vision feature layers
num_feature_layers = 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * num_feature_layers,
config.text_config.hidden_size,
bias=config.multimodal_projector_bias,
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(
config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias
)
def forward(self, image_features):
hidden_states = self.linear_1(image_features)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@auto_docstring
| VideoLlavaMultiModalProjector |
python | ApeWorX__ape | src/ape_cache/models.py | {
"start": 2096,
"end": 2745
} | class ____(Base):
__tablename__ = "contract_events" # type: ignore
id = Column(Integer, primary_key=True, index=True)
event_name = Column(String, nullable=False, index=True)
contract_address = Column(HexByteString, nullable=False, index=True)
event_arguments = Column(JSON, index=True)
transaction_hash = Column(HexByteString, nullable=False, index=True)
block_number = Column(Integer, nullable=False, index=True)
block_hash = Column(HexByteString, nullable=False, index=True)
log_index = Column(Integer, nullable=False, index=True)
transaction_index = Column(Integer, nullable=False, index=True)
| ContractEvents |
python | spack__spack | lib/spack/spack/test/packages.py | {
"start": 522,
"end": 1017
} | class ____(spack.repo._PrependFileLoader):
"""Skip explicit prepending of 'spack_repo.builtin.build_systems' import."""
def __init__(self, fullname, repo, package_name):
super().__init__(fullname, repo, package_name)
self.prepend = b""
def pkg_factory(name):
"""Return a package object tied to an abstract spec"""
pkg_cls = spack.repo.PATH.get_pkg_class(name)
return pkg_cls(Spec(name))
@pytest.mark.usefixtures("config", "mock_packages")
| MyPrependFileLoader |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events_stats.py | {
"start": 111477,
"end": 114628
} | class ____(
APITestCase, ProfilesSnubaTestCase, SearchIssueTestMixin
):
endpoint = "sentry-api-0-organization-events-stats"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.one_day_ago = before_now(days=1).replace(hour=10, minute=0, second=0, microsecond=0)
self.two_days_ago = before_now(days=2).replace(hour=10, minute=0, second=0, microsecond=0)
self.three_days_ago = before_now(days=3).replace(hour=10, minute=0, second=0, microsecond=0)
self.project = self.create_project()
self.url = reverse(
"sentry-api-0-organization-events-stats",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
def test_functions_dataset_simple(self) -> None:
self.store_functions(
[
{
"self_times_ns": [100 for _ in range(100)],
"package": "pkg",
"function": "foo",
"in_app": True,
},
{
"self_times_ns": [100 for _ in range(10)],
"package": "pkg",
"function": "bar",
"in_app": True,
},
],
project=self.project,
timestamp=self.two_days_ago,
)
y_axes = [
"cpm()",
"p95(function.duration)",
"all_examples()",
]
data = {
"dataset": "profileFunctions",
"field": ["function", "count()"],
"start": self.three_days_ago.isoformat(),
"end": self.one_day_ago.isoformat(),
"yAxis": y_axes,
"interval": "1d",
"topEvents": "2",
"excludeOther": "1",
}
response = self.client.get(self.url, data=data, format="json")
assert response.status_code == 200, response.content
assert sum(
row[1][0]["count"] for row in response.data["foo"]["cpm()"]["data"]
) == pytest.approx(
100 / ((self.one_day_ago - self.three_days_ago).total_seconds() / 60), rel=1e-3
)
assert sum(
row[1][0]["count"] for row in response.data["bar"]["cpm()"]["data"]
) == pytest.approx(
10 / ((self.one_day_ago - self.three_days_ago).total_seconds() / 60), rel=1e-3
)
assert any(
row[1][0]["count"] > 0 for row in response.data["foo"]["p95(function.duration)"]["data"]
)
assert any(
row[1][0]["count"] > 0 for row in response.data["bar"]["p95(function.duration)"]["data"]
)
for func in ["foo", "bar"]:
for y_axis in y_axes:
assert response.data[func][y_axis]["meta"]["units"] == {
"time": None,
"count": None,
"cpm": None,
"function": None,
"p95_function_duration": "nanosecond",
"all_examples": None,
}
| OrganizationEventsStatsTopNEventsProfileFunctionDatasetEndpointTest |
python | getsentry__sentry | src/sentry/issues/endpoints/actionable_items.py | {
"start": 643,
"end": 2248
} | class ____(ProjectEndpoint):
"""
This endpoint is used to retrieve actionable items that a user can perform on an event. It is a private endpoint
that is only used by the Sentry UI. The Source Map Debugging endpoint will remain public as it will only ever
return information about the source map debugging process while this endpoint will grow. Actionable items are
errors or messages we show to users about problems with their event which we will show the user how to fix.
"""
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ISSUES
def get(self, request: Request, project: Project, event_id: str) -> Response:
# Retrieve information about actionable items (source maps, event errors, etc.) for a given event.
event = eventstore.backend.get_event_by_id(project.id, event_id)
if event is None:
raise NotFound(detail="Event not found")
actions = []
event_errors = event.data.get("errors", [])
# Add event errors to actionable items
for event_error in event_errors:
if (
event_error["type"] in errors_to_hide
or event_error["type"] in deprecated_event_errors
):
continue
response = EventError(event_error).get_api_context()
actions.append(response)
priority_get = lambda x: priority_ranking.get(x["type"], ActionPriority.UNKNOWN)
sorted_errors = sorted(actions, key=priority_get)
return Response({"errors": sorted_errors})
| ActionableItemsEndpoint |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/enums.py | {
"start": 3968,
"end": 4071
} | class ____(_SunderMissingInDataType, enum.Enum):
"""this is enum class"""
| EnumSunderMissingInDataType |
python | kubernetes-client__python | kubernetes/client/models/v1_key_to_path.py | {
"start": 383,
"end": 6043
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'mode': 'int',
'path': 'str'
}
attribute_map = {
'key': 'key',
'mode': 'mode',
'path': 'path'
}
def __init__(self, key=None, mode=None, path=None, local_vars_configuration=None): # noqa: E501
"""V1KeyToPath - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._mode = None
self._path = None
self.discriminator = None
self.key = key
if mode is not None:
self.mode = mode
self.path = path
@property
def key(self):
"""Gets the key of this V1KeyToPath. # noqa: E501
key is the key to project. # noqa: E501
:return: The key of this V1KeyToPath. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1KeyToPath.
key is the key to project. # noqa: E501
:param key: The key of this V1KeyToPath. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def mode(self):
"""Gets the mode of this V1KeyToPath. # noqa: E501
mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:return: The mode of this V1KeyToPath. # noqa: E501
:rtype: int
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this V1KeyToPath.
mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. # noqa: E501
:param mode: The mode of this V1KeyToPath. # noqa: E501
:type: int
"""
self._mode = mode
@property
def path(self):
"""Gets the path of this V1KeyToPath. # noqa: E501
path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
:return: The path of this V1KeyToPath. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this V1KeyToPath.
path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. # noqa: E501
:param path: The path of this V1KeyToPath. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and path is None: # noqa: E501
raise ValueError("Invalid value for `path`, must not be `None`") # noqa: E501
self._path = path
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1KeyToPath):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1KeyToPath):
return True
return self.to_dict() != other.to_dict()
| V1KeyToPath |
python | realpython__materials | arcade-platformer/arcade_platformer/01_game_skeleton.py | {
"start": 245,
"end": 1217
} | class ____(arcade.Window):
def __init__(self):
pass
def setup(self):
"""Sets up the game for the current level"""
pass
def on_key_press(self, key: int, modifiers: int):
"""Processes key presses
Arguments:
key {int} -- Which key was pressed
modifiers {int} -- Which modifiers were down at the time
"""
def on_key_release(self, key: int, modifiers: int):
"""Processes key releases
Arguments:
key {int} -- Which key was released
modifiers {int} -- Which modifiers were down at the time
"""
def on_update(self, delta_time: float):
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
pass
def on_draw(self):
pass
if __name__ == "__main__":
window = Platformer()
window.setup()
arcade.run()
| Platformer |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_length/invalid_length_returned.py | {
"start": 306,
"end": 424
} | class ____:
"""__len__ returns <type 'long'>"""
def __len__(self):
return sys.maxsize + 1
| SecondGoodLen |
python | tensorflow__tensorflow | tensorflow/python/framework/c_api_util.py | {
"start": 3807,
"end": 4310
} | class ____(object):
"""Wrapper around TF_ImportGraphDefOptions that handles deletion."""
__slots__ = ["results"]
def __init__(self, results):
self.results = results
def __del__(self):
# Note: when we're destructing the global context (i.e when the process is
# terminating) we can have already deleted other modules.
if c_api is not None and c_api.TF_DeleteImportGraphDefResults is not None:
c_api.TF_DeleteImportGraphDefResults(self.results)
| ScopedTFImportGraphDefResults |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_macosx.py | {
"start": 334,
"end": 725
} | class ____(_macosx.Timer, TimerBase):
"""Subclass of `.TimerBase` using CFRunLoop timer events."""
# completely implemented at the C-level (in _macosx.Timer)
def _allow_interrupt_macos():
"""A context manager that allows terminating a plot by sending a SIGINT."""
return _allow_interrupt(
lambda rsock: _macosx.wake_on_fd_write(rsock.fileno()), _macosx.stop)
| TimerMac |
python | astropy__astropy | astropy/visualization/tests/test_interval.py | {
"start": 413,
"end": 3425
} | class ____:
data = np.linspace(-20.0, 60.0, 100)
def test_manual(self):
interval = ManualInterval(-10.0, +15.0)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -10.0)
assert_allclose(vmax, +15.0)
def test_manual_defaults(self):
interval = ManualInterval(vmin=-10.0)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -10.0)
assert_allclose(vmax, np.max(self.data))
interval = ManualInterval(vmax=15.0)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, np.min(self.data))
assert_allclose(vmax, 15.0)
def test_manual_zero_limit(self):
# Regression test for a bug that caused ManualInterval to compute the
# limit (min or max) if it was set to zero.
interval = ManualInterval(vmin=0, vmax=0)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, 0)
assert_allclose(vmax, 0)
def test_manual_defaults_with_nan(self):
interval = ManualInterval()
data = np.copy(self.data)
data[0] = np.nan
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -20)
assert_allclose(vmax, +60)
def test_minmax(self):
interval = MinMaxInterval()
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -20.0)
assert_allclose(vmax, +60.0)
def test_percentile(self):
interval = PercentileInterval(62.2)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -4.88)
assert_allclose(vmax, 44.88)
def test_asymmetric_percentile(self):
interval = AsymmetricPercentileInterval(10.5, 70.5)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -11.6)
assert_allclose(vmax, 36.4)
def test_asymmetric_percentile_nsamples(self):
with NumpyRNGContext(12345):
interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -14.367676767676768)
assert_allclose(vmax, 40.266666666666666)
def test_symmetric_interval_manual(self):
interval = SymmetricInterval(radius=40)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -40.0)
assert_allclose(vmax, +40.0)
interval = SymmetricInterval(radius=100, midpoint=10)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -90.0)
assert_allclose(vmax, +110.0)
def test_symmetric_interval_auto(self):
interval = SymmetricInterval()
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -60.0)
assert_allclose(vmax, +60.0)
interval = SymmetricInterval(midpoint=50)
vmin, vmax = interval.get_limits(self.data)
assert_allclose(vmin, -20.0)
assert_allclose(vmax, +120.0)
| TestInterval |
python | bokeh__bokeh | tests/unit/bokeh/core/test_enums.py | {
"start": 4547,
"end": 18280
} | class ____:
def test_Anchor(self) -> None:
assert tuple(bce.Anchor) == (
"top_left", "top_center", "top_right",
"center_left", "center_center", "center_right",
"bottom_left", "bottom_center", "bottom_right",
"top", "left", "center", "right", "bottom",
)
def test_AngleUnits(self) -> None:
assert tuple(bce.AngleUnits) == ("deg", "rad", "grad", "turn")
def test_Auto(self) -> None:
assert tuple(bce.Auto) == ("auto",)
def test_ButtonType(self) -> None:
assert tuple(bce.ButtonType) == ("default", "primary", "success", "warning", "danger", "light")
def test_CalendarPosition(self) -> None:
assert tuple(bce.CalendarPosition) == ("auto", "above", "below")
def test_ContextWhich(self) -> None:
assert tuple(bce.ContextWhich) == ("start", "center", "end", "all")
def test_DashPattern(self) -> None:
assert tuple(bce.DashPattern) ==("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_DateFormat(self) -> None:
assert tuple(bce.DateFormat) == ("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
def test_DatetimeUnits(self) -> None:
assert tuple(bce.DatetimeUnits) == ("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
def test_Dimension(self) -> None:
assert tuple(bce.Dimension) == ("width", "height")
def test_Dimensions(self) -> None:
assert tuple(bce.Dimensions) == ("width", "height", "both")
def test_Direction(self) -> None:
assert tuple(bce.Direction) == ("clock", "anticlock")
def test_FontStyle(self) -> None:
assert tuple(bce.FontStyle) == ('normal', 'italic', 'bold', 'bold italic')
def test_BuiltinFormatter(self) -> None:
assert tuple(bce.BuiltinFormatter) == ("raw", "basic", "numeral", "printf", "datetime")
def test_HatchPattern(self) -> None:
assert tuple(bce.HatchPattern) == (
"blank", "dot", "ring", "horizontal_line", "vertical_line", "cross", "horizontal_dash", "vertical_dash",
"spiral", "right_diagonal_line", "left_diagonal_line", "diagonal_cross", "right_diagonal_dash",
"left_diagonal_dash", "horizontal_wave", "vertical_wave", "criss_cross",
)
def test_HatchPatternAbbreviation(self) -> None:
assert tuple(bce.HatchPatternAbbreviation) == (' ', '.', 'o', '-', '|', '+', '"', ':', '@', '/', '\\', 'x', ',', '`', 'v', '>', '*')
def test_HexTileOrientation(self) -> None:
assert tuple(bce.HexTileOrientation) == ("pointytop", "flattop")
def test_HoldPolicy(self) -> None:
assert tuple(bce.HoldPolicy) == ("combine", "collect")
def test_HorizontalLocation(self) -> None:
assert tuple(bce.HorizontalLocation) == ("left", "right")
def test_ImageOrigin(self) -> None:
assert tuple(bce.ImageOrigin) == ("bottom_left", "top_left", "bottom_right", "top_right")
def test_ImplicitTarget(self) -> None:
assert tuple(bce.ImplicitTarget) == ("viewport", "canvas", "plot", "frame", "parent")
def test_JitterRandomDistribution(self) -> None:
assert tuple(bce.JitterRandomDistribution) == ("uniform", "normal")
def test_KeyModifier(self) -> None:
assert tuple(bce.KeyModifier) == ("shift", "ctrl", "alt")
def test_LabelOrientation(self) -> None:
assert tuple(bce.LabelOrientation) == ("horizontal", "vertical", "parallel", "normal")
def test_LatLon(self) -> None:
assert tuple(bce.LatLon) == ("lat", "lon")
def test_AlternationPolicy(self) -> None:
assert tuple(bce.AlternationPolicy) == ("none", "even", "odd", "every")
def test_LegendClickPolicy(self) -> None:
assert tuple(bce.LegendClickPolicy) == ("none", "hide", "mute")
def test_LegendLocation(self) -> None:
assert tuple(bce.LegendLocation) == (
"top_left", "top_center", "top_right",
"center_left", "center_center", "center_right",
"bottom_left", "bottom_center", "bottom_right",
"top", "left", "center", "right", "bottom",
)
def test_LineCap(self) -> None:
assert tuple(bce.LineCap) == ("butt", "round", "square")
def test_LineDash(self) -> None:
assert tuple(bce.LineDash) == ("solid", "dashed", "dotted", "dotdash", "dashdot")
def test_LineJoin(self) -> None:
assert tuple(bce.LineJoin) == ("miter", "round", "bevel")
def test_Location(self) -> None:
assert tuple(bce.Location) == ("above", "below", "left", "right")
def test_MapType(self) -> None:
assert tuple(bce.MapType) == ("satellite", "roadmap", "terrain", "hybrid")
def test_MarkerType(self) -> None:
assert tuple(bce.MarkerType) == ("asterisk", "circle", "circle_cross", "circle_dot", "circle_x", "circle_y", "cross",
"dash", "diamond", "diamond_cross", "diamond_dot", "dot", "hex", "hex_dot", "inverted_triangle",
"plus", "square", "square_cross", "square_dot", "square_pin", "square_x", "star", "star_dot",
"triangle", "triangle_dot", "triangle_pin", "x", "y")
def test_Movable(self) -> None:
assert tuple(bce.Movable) == ("none", "x", "y", "both")
def test_NamedColor(self) -> None:
assert len(tuple(bce.NamedColor)) == 148
assert tuple(bce.NamedColor) == tuple(named.__all__)
def test_NumeralLanguage(self) -> None:
assert tuple(bce.NumeralLanguage) == ("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
def test_Orientation(self) -> None:
assert tuple(bce.Orientation) == ("horizontal", "vertical")
def test_OutlineShapeName(self) -> None:
assert tuple(bce.OutlineShapeName) == ("none", "box", "rectangle", "square", "circle", "ellipse", "trapezoid", "parallelogram", "diamond", "triangle")
def test_OutputBackend(self) -> None:
assert tuple(bce.OutputBackend) == ("canvas", "svg", "webgl")
def test_PaddingUnits(self) -> None:
assert tuple(bce.PaddingUnits) == ("percent", "absolute")
def test_Palette(self) -> None:
assert tuple(bce.Palette) == tuple(__palettes__)
def test_PanDirection(self) -> None:
assert tuple(bce.PanDirection) == ("left", "right", "up", "down", "west", "east", "north", "south")
def test_RenderLevel(self) -> None:
assert tuple(bce.RenderLevel) == ("image", "underlay", "glyph", "guide", "annotation", "overlay")
def test_ResetPolicy(self) -> None:
assert tuple(bce.ResetPolicy) == ("standard", "event_only")
def test_Resizable(self) -> None:
assert tuple(bce.Resizable) == ("none", "left", "right", "top", "bottom", "x", "y", "all")
def test_ResolutionType(self) -> None:
assert tuple(bce.ResolutionType) == ("microseconds", "milliseconds", "seconds", "minsec", "minutes", "hourmin", "hours", "days", "months", "years")
def test_RoundingFunction(self) -> None:
assert tuple(bce.RoundingFunction) == ("round", "nearest", "floor", "rounddown", "ceil", "roundup")
def test_RadiusDimension(self) -> None:
assert tuple(bce.RadiusDimension) == ("x", "y", "max", "min")
def test_RegionSelectionMode(self) -> None:
assert tuple(bce.RegionSelectionMode) == ("replace", "append", "intersect", "subtract", "xor")
def test_SelectionMode(self) -> None:
assert tuple(bce.SelectionMode) == ("replace", "append", "intersect", "subtract", "xor", "toggle")
def test_SizingMode(self) -> None:
assert tuple(bce.SizingMode) == ("stretch_width", "stretch_height", "stretch_both", "scale_width", "scale_height", "scale_both", "fixed", "inherit")
def test_SortDirection(self) -> None:
assert tuple(bce.SortDirection) == ("ascending", "descending")
def test_SpatialUnits(self) -> None:
assert tuple(bce.SpatialUnits) == ("screen", "data")
def test_StartEnd(self) -> None:
assert tuple(bce.StartEnd) == ("start", "end")
def test_StepMode(self) -> None:
assert tuple(bce.StepMode) == ("before", "after", "center")
def test_TeXDisplay(self) -> None:
assert tuple(bce.TeXDisplay) == ("inline", "block", "auto")
def test_TextAlign(self) -> None:
assert tuple(bce.TextAlign) == ("left", "right", "center")
def test_TextBaseline(self) -> None:
assert tuple(bce.TextBaseline) == ("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
def test_TextureRepetition(self) -> None:
assert tuple(bce.TextureRepetition) == ("repeat", "repeat_x", "repeat_y", "no_repeat")
def test_TimedeltaResolutionType(self) -> None:
assert tuple(bce.TimedeltaResolutionType) == (
"nanoseconds", "microseconds", "milliseconds", "seconds", "minsec", "minutes", "hourmin", "hours", "days")
def test_ToolIcon(self) -> None:
assert tuple(bce.ToolIcon) == (
"append_mode",
"arrow_down_to_bar",
"arrow_up_from_bar",
"auto_box_zoom",
"bold",
"box_edit",
"box_select",
"box_zoom",
"caret_down",
"caret_left",
"caret_right",
"caret_up",
"check",
"chevron_down",
"chevron_left",
"chevron_right",
"chevron_up",
"clear_selection",
"copy",
"crosshair",
"dark_theme",
"delete",
"freehand_draw",
"fullscreen",
"help",
"hover",
"intersect_mode",
"invert_selection",
"italic",
"lasso_select",
"light_theme",
"line_edit",
"maximize",
"minimize",
"pan",
"pin",
"point_draw",
"pointer",
"poly_draw",
"poly_edit",
"polygon_select",
"range",
"redo",
"replace_mode",
"reset",
"save",
"see_off",
"see_on",
"settings",
"square",
"square_check",
"subtract_mode",
"tap_select",
"text_align_center",
"text_align_left",
"text_align_right",
"undo",
"unknown",
"unpin",
"wheel_pan",
"wheel_zoom",
"x_box_select",
"x_box_zoom",
"x_grip",
"x_pan",
"xor_mode",
"y_box_select",
"y_box_zoom",
"y_grip",
"y_pan",
"zoom_in",
"zoom_out",
)
def test_ToolName(self) -> None:
assert tuple(bce.ToolName) == (
"auto_box_zoom",
"box_select",
"box_zoom",
"click",
"copy",
"crosshair",
"doubletap",
"examine",
"freehand_draw",
"fullscreen",
"help",
"hover",
"lasso_select",
"pan",
"pan_down",
"pan_east",
"pan_left",
"pan_north",
"pan_right",
"pan_south",
"pan_up",
"pan_west",
"poly_select",
"redo",
"reset",
"save",
"tap",
"undo",
"wheel_zoom",
"xbox_select",
"xbox_zoom",
"xcrosshair",
"xpan",
"xwheel_pan",
"xwheel_zoom",
"xzoom_in",
"xzoom_out",
"ybox_select",
"ybox_zoom",
"ycrosshair",
"ypan",
"ywheel_pan",
"ywheel_zoom",
"yzoom_in",
"yzoom_out",
"zoom_in",
"zoom_out",
)
def test_TooltipAttachment(self) -> None:
assert tuple(bce.TooltipAttachment) == ("horizontal", "vertical", "left", "right", "above", "below")
def test_TooltipFieldFormatter(self) -> None:
assert tuple(bce.TooltipFieldFormatter) == ("numeral", "datetime", "printf")
def test_VerticalAlign(self) -> None:
assert tuple(bce.VerticalAlign) == ("top", "middle", "bottom")
def test_VerticalLocation(self) -> None:
assert tuple(bce.VerticalLocation) == ("above", "below")
def test_WindowAxis(self) -> None:
assert tuple(bce.WindowAxis) == ("none", "x", "y")
# any changes to contents of bce.py easily trackable here
def test_enums_contents() -> None:
assert [name for name in dir(bce) if isinstance(getattr(bce, name), bce.Enumeration)] == list(ALL)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bce, ALL)
| Test_bce |
python | walkccc__LeetCode | solutions/3009. Maximum Number of Intersections on the Chart/3009.py | {
"start": 0,
"end": 482
} | class ____:
def maxIntersectionCount(self, y: list[int]) -> int:
ans = 0
intersectionCount = 0
line = collections.Counter()
for i, (a, b) in enumerate(itertools.pairwise(y)):
start = 2 * a
end = 2 * b + (0 if i == len(y) - 2 else -1 if b > a else 1)
line[min(start, end)] += 1
line[max(start, end) + 1] -= 1
for count in sorted(line):
intersectionCount += line[count]
ans = max(ans, intersectionCount)
return ans
| Solution |
python | django__django | django/db/backends/sqlite3/base.py | {
"start": 13413,
"end": 15095
} | class ____(Database.Cursor):
"""
Django uses the "format" and "pyformat" styles, but Python's sqlite3 module
supports neither of these styles.
This wrapper performs the following conversions:
- "format" style to "qmark" style
- "pyformat" style to "named" style
In both cases, if you want to use a literal "%s", you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return super().execute(query)
# Extract names if params is a mapping, i.e. "pyformat" style is used.
param_names = list(params) if isinstance(params, Mapping) else None
query = self.convert_query(query, param_names=param_names)
return super().execute(query, params)
def executemany(self, query, param_list):
# Extract names if params is a mapping, i.e. "pyformat" style is used.
# Peek carefully as a generator can be passed instead of a list/tuple.
peekable, param_list = tee(iter(param_list))
if (params := next(peekable, None)) and isinstance(params, Mapping):
param_names = list(params)
else:
param_names = None
query = self.convert_query(query, param_names=param_names)
return super().executemany(query, param_list)
def convert_query(self, query, *, param_names=None):
if param_names is None:
# Convert from "format" style to "qmark" style.
return FORMAT_QMARK_REGEX.sub("?", query).replace("%%", "%")
else:
# Convert from "pyformat" style to "named" style.
return query % {name: f":{name}" for name in param_names}
| SQLiteCursorWrapper |
python | dask__dask | dask/dataframe/dask_expr/diagnostics/_analyze_plugin.py | {
"start": 2350,
"end": 2806
} | class ____:
_expr_statistics: defaultdict[str, ExpressionStatistics]
def __init__(self) -> None:
self._expr_statistics = defaultdict(ExpressionStatistics)
def add(self, expr: str, metric: str, value: float):
self._expr_statistics[expr].add(metric, value)
def merge(self, other: Statistics):
for expr, statistics in other._expr_statistics.items():
self._expr_statistics[expr].merge(statistics)
| Statistics |
python | pytorch__pytorch | torch/ao/nn/qat/modules/embedding_ops.py | {
"start": 3833,
"end": 7867
} | class ____(nn.EmbeddingBag):
r"""
An embedding bag module attached with FakeQuantize modules for weight,
used for quantization aware training.
We adopt the same interface as `torch.nn.EmbeddingBag`, please see
https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html#torch.nn.EmbeddingBag
for documentation.
Similar to `torch.nn.EmbeddingBag`, with FakeQuantize modules initialized to
default.
Attributes:
weight: fake quant module for weight
"""
_FLOAT_MODULE = nn.EmbeddingBag
def __init__(
self,
num_embeddings,
embedding_dim,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
mode="mean",
sparse=False,
_weight=None,
include_last_offset=False,
padding_idx=None,
qconfig=None,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_embeddings,
embedding_dim,
max_norm,
norm_type,
scale_grad_by_freq,
mode,
sparse,
_weight,
include_last_offset,
padding_idx,
**factory_kwargs,
)
assert qconfig, "qconfig must be provided for QAT module"
assert qconfig.weight().qscheme == torch.per_channel_affine_float_qparams, (
"Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
+ str(qconfig.weight().qscheme)
)
self.qconfig = qconfig
self.weight_fake_quant = qconfig.weight(factory_kwargs=factory_kwargs)
def forward(self, input, offsets=None, per_sample_weights=None) -> Tensor:
return F.embedding_bag(
input,
self.weight_fake_quant(self.weight),
offsets,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.mode,
self.sparse,
per_sample_weights,
self.include_last_offset,
self.padding_idx,
)
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
r"""Create a qat module from a float module
Args: `mod` a float module, either produced by torch.ao.quantization utilities
or directly from user
"""
assert type(mod) is cls._FLOAT_MODULE, (
" qat."
+ cls.__name__
+ ".from_float only works for "
+ cls._FLOAT_MODULE.__name__
)
assert hasattr(mod, "qconfig"), "Input float module must have qconfig defined"
assert mod.qconfig, "Input float module must have a valid qconfig"
weight_qscheme = mod.qconfig.weight().qscheme # type: ignore[union-attr, operator]
assert weight_qscheme == torch.per_channel_affine_float_qparams, (
"Embedding Bag weights requires a qscheme of torch.per_channel_affine_float_qparams Got "
+ str(weight_qscheme)
)
qconfig = mod.qconfig
qat_embedding_bag = cls(
mod.num_embeddings,
mod.embedding_dim,
mod.max_norm,
mod.norm_type,
mod.scale_grad_by_freq,
mod.mode,
mod.sparse,
mod.weight,
mod.include_last_offset,
mod.padding_idx,
qconfig=qconfig,
)
return qat_embedding_bag
def to_float(self):
embedding_bag = torch.nn.EmbeddingBag(
self.num_embeddings,
self.embedding_dim,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.mode,
self.sparse,
None,
self.include_last_offset,
self.padding_idx,
)
embedding_bag.weight = torch.nn.Parameter(self.weight.detach())
embedding_bag.train(self.training)
return embedding_bag
| EmbeddingBag |
python | redis__redis-py | redis/commands/core.py | {
"start": 56490,
"end": 56756
} | class ____(Enum):
# set the value for each provided key to each
# provided value only if all do not already exist.
NX = "NX"
# set the value for each provided key to each
# provided value only if all already exist.
XX = "XX"
| DataPersistOptions |
python | run-llama__llama_index | llama-index-finetuning/llama_index/finetuning/cross_encoders/cross_encoder.py | {
"start": 339,
"end": 4798
} | class ____(BaseCrossEncoderFinetuningEngine):
"""Cross-Encoders Finetune Engine."""
def __init__(
self,
dataset: List[CrossEncoderFinetuningDatasetSample],
model_id: str = "cross-encoder/ms-marco-MiniLM-L-12-v2",
model_output_path: str = "exp_finetune",
batch_size: int = 10,
val_dataset: Union[List[CrossEncoderFinetuningDatasetSample], None] = None,
loss: Union[Any, None] = None,
epochs: int = 2,
show_progress_bar: bool = True,
evaluation_steps: int = 50,
) -> None:
"""Init params."""
try:
from sentence_transformers import InputExample
from sentence_transformers.cross_encoder import CrossEncoder
from torch.utils.data import DataLoader
except ImportError:
raise ImportError(
"Cannot import sentence-transformers package,",
"please `pip install sentence-transformers`",
)
self.dataset = dataset
self.model_id = model_id
self.model_output_path = model_output_path
self.model = CrossEncoder(self.model_id, num_labels=1)
examples: Any = []
for sample in dataset:
query = sample.query
text = sample.context
score = sample.score
example = InputExample(texts=[query, text], label=score)
examples.append(example)
self.examples = examples
self.loader: DataLoader = DataLoader(examples, batch_size=batch_size)
# define evaluator
from sentence_transformers.cross_encoder.evaluation import (
CEBinaryClassificationEvaluator,
)
# TODO: also add support for CERerankingEvaluator
evaluator: Optional[CEBinaryClassificationEvaluator] = None
if val_dataset is not None:
dev_samples = []
for val_sample in val_dataset:
val_query = val_sample.query
val_text = val_sample.context
val_score = val_sample.score
val_example = InputExample(texts=[val_query, val_text], label=val_score)
dev_samples.append(val_example)
evaluator = CEBinaryClassificationEvaluator.from_input_examples(dev_samples)
self.evaluator = evaluator
# define loss
self.loss = loss
self.epochs = epochs
self.show_progress_bar = show_progress_bar
self.evaluation_steps = evaluation_steps
self.warmup_steps = int(len(self.loader) * epochs * 0.1)
def finetune(self, **train_kwargs: Any) -> None:
"""Finetune model."""
self.model.fit(
train_dataloader=self.loader,
epochs=self.epochs,
warmup_steps=self.warmup_steps,
output_path=self.model_output_path,
show_progress_bar=self.show_progress_bar,
evaluator=self.evaluator,
evaluation_steps=self.evaluation_steps,
)
# CrossEncoder library's fit function does not save model when evaluator is None
# https://github.com/UKPLab/sentence-transformers/issues/2324
if self.evaluator is None:
self.model.save(self.model_output_path)
else:
pass
def push_to_hub(self, repo_id: Any = None) -> None:
"""
Saves the model and tokenizer to HuggingFace hub.
"""
if repo_id is not None:
try:
self.model.model.push_to_hub(repo_id=repo_id)
self.model.tokenizer.push_to_hub(repo_id=repo_id)
except ValueError:
raise ValueError(
"HuggingFace CLI/Hub login not "
"completed provide token to login using"
"huggingface_hub.login() see this "
"https://huggingface.co/docs/transformers/model_sharing#share-a-model"
)
else:
raise ValueError("No value provided for repo_id")
def get_finetuned_model(
self, model_name: str, top_n: int = 3
) -> SentenceTransformerRerank:
"""
Loads the model from huggingface hub as re-ranker.
:param repo_id: Huggingface Hub repo from where you want to load the model
:param top_n: The value of nodes the re-ranker should filter
"""
return SentenceTransformerRerank(model=model_name, top_n=top_n)
| CrossEncoderFinetuneEngine |
python | pypa__warehouse | warehouse/metrics/services.py | {
"start": 1586,
"end": 4197
} | class ____:
def __init__(self, datadog):
self._datadog = datadog
@classmethod
def create_service(cls, context, request):
return cls(
DogStatsd(
host=request.registry.settings.get("metrics.host", "127.0.0.1"),
port=int(request.registry.settings.get("metrics.port", 8125)),
namespace=request.registry.settings.get("metrics.namespace"),
use_ms=True,
)
)
def gauge(self, metric, value, tags=None, sample_rate=1):
self._datadog.gauge(metric, value, tags=tags, sample_rate=sample_rate)
def increment(self, metric, value=1, tags=None, sample_rate=1):
self._datadog.increment(metric, value, tags=tags, sample_rate=sample_rate)
def decrement(self, metric, value=1, tags=None, sample_rate=1):
self._datadog.decrement(metric, value, tags=tags, sample_rate=sample_rate)
def histogram(self, metric, value, tags=None, sample_rate=1):
self._datadog.histogram(metric, value, tags=tags, sample_rate=sample_rate)
def distribution(self, metric, value, tags=None, sample_rate=1):
self._datadog.distribution(metric, value, tags=tags, sample_rate=sample_rate)
def timing(self, metric, value, tags=None, sample_rate=1):
self._datadog.timing(metric, value, tags=tags, sample_rate=sample_rate)
def timed(self, metric=None, tags=None, sample_rate=1, use_ms=None):
return self._datadog.timed(
metric, tags=tags, sample_rate=sample_rate, use_ms=use_ms
)
def set(self, metric, value, tags=None, sample_rate=1):
self._datadog.set(metric, value, tags=tags, sample_rate=sample_rate)
def event(
self,
title,
text,
alert_type=None,
aggregation_key=None,
source_type_name=None,
date_happened=None,
priority=None,
tags=None,
hostname=None,
):
self._datadog.event(
title,
text,
alert_type=alert_type,
aggregation_key=aggregation_key,
source_type_name=source_type_name,
date_happened=date_happened,
priority=priority,
tags=tags,
hostname=hostname,
)
def service_check(
self, check_name, status, tags=None, timestamp=None, hostname=None, message=None
):
self._datadog.service_check(
check_name,
status,
tags=tags,
timestamp=timestamp,
hostname=hostname,
message=message,
)
| DataDogMetrics |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/comments.py | {
"start": 24160,
"end": 33548
} | class ____(ordereddict, CommentedBase):
__slots__ = (Comment.attrib, '_ok', '_ref')
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
self._ok = set() # type: MutableSet[Any] # own keys
self._ref = [] # type: List[CommentedMap]
ordereddict.__init__(self, *args, **kw)
def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
# type: (Any, Optional[Any], Optional[Any]) -> None
"""values is set to key to indicate a value attachment of comment"""
if key is not NoComment:
self.yaml_key_comment_extend(key, comment)
return
if value is not NoComment:
self.yaml_value_comment_extend(value, comment)
else:
self.ca.comment = comment
def _yaml_add_eol_comment(self, comment, key):
# type: (Any, Any) -> None
"""add on the value line, with value specified by the key"""
self._yaml_add_comment(comment, value=key)
def _yaml_get_columnX(self, key):
# type: (Any) -> Any
return self.ca.items[key][2].start_mark.column
def _yaml_get_column(self, key):
# type: (Any) -> Any
column = None
sel_idx = None
pre, post, last = None, None, None
for x in self:
if pre is not None and x != key:
post = x
break
if x == key:
pre = last
last = x
if pre in self.ca.items:
sel_idx = pre
elif post in self.ca.items:
sel_idx = post
else:
# self.ca.items is not ordered
for k1 in self:
if k1 >= key:
break
if k1 not in self.ca.items:
continue
sel_idx = k1
if sel_idx is not None:
column = self._yaml_get_columnX(sel_idx)
return column
def _yaml_get_pre_comment(self):
# type: () -> Any
pre_comments = [] # type: List[Any]
if self.ca.comment is None:
self.ca.comment = [None, pre_comments]
else:
pre_comments = self.ca.comment[1]
return pre_comments
def _yaml_clear_pre_comment(self):
# type: () -> Any
pre_comments = [] # type: List[Any]
if self.ca.comment is None:
self.ca.comment = [None, pre_comments]
else:
self.ca.comment[1] = pre_comments
return pre_comments
def update(self, *vals, **kw):
# type: (Any, Any) -> None
try:
ordereddict.update(self, *vals, **kw)
except TypeError:
# probably a dict that is used
for x in vals[0]:
self[x] = vals[0][x]
if vals:
try:
self._ok.update(vals[0].keys()) # type: ignore
except AttributeError:
# assume one argument that is a list/tuple of two element lists/tuples
for x in vals[0]:
self._ok.add(x[0])
if kw:
self._ok.add(*kw.keys())
def insert(self, pos, key, value, comment=None):
# type: (Any, Any, Any, Optional[Any]) -> None
"""insert key value into given position
attach comment if provided
"""
keys = list(self.keys()) + [key]
ordereddict.insert(self, pos, key, value)
for keytmp in keys:
self._ok.add(keytmp)
for referer in self._ref:
for keytmp in keys:
referer.update_key_value(keytmp)
if comment is not None:
self.yaml_add_eol_comment(comment, key=key)
def mlget(self, key, default=None, list_ok=False):
# type: (Any, Any, Any) -> Any
"""multi-level get that expects dicts within dicts"""
if not isinstance(key, list):
return self.get(key, default)
# assume that the key is a list of recursively accessible dicts
def get_one_level(key_list, level, d):
# type: (Any, Any, Any) -> Any
if not list_ok:
assert isinstance(d, dict)
if level >= len(key_list):
if level > len(key_list):
raise IndexError
return d[key_list[level - 1]]
return get_one_level(key_list, level + 1, d[key_list[level - 1]])
try:
return get_one_level(key, 1, self)
except KeyError:
return default
except (TypeError, IndexError):
if not list_ok:
raise
return default
def __getitem__(self, key):
# type: (Any) -> Any
try:
return ordereddict.__getitem__(self, key)
except KeyError:
for merged in getattr(self, merge_attrib, []):
if key in merged[1]:
return merged[1][key]
raise
def __setitem__(self, key, value):
# type: (Any, Any) -> None
# try to preserve the scalarstring type if setting an existing key to a new value
if key in self:
if (
isinstance(value, str)
and not isinstance(value, ScalarString)
and isinstance(self[key], ScalarString)
):
value = type(self[key])(value)
ordereddict.__setitem__(self, key, value)
self._ok.add(key)
def _unmerged_contains(self, key):
# type: (Any) -> Any
if key in self._ok:
return True
return None
def __contains__(self, key):
# type: (Any) -> bool
return bool(ordereddict.__contains__(self, key))
def get(self, key, default=None):
# type: (Any, Any) -> Any
try:
return self.__getitem__(key)
except: # NOQA
return default
def __repr__(self):
# type: () -> Any
return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict')
def non_merged_items(self):
# type: () -> Any
for x in ordereddict.__iter__(self):
if x in self._ok:
yield x, ordereddict.__getitem__(self, x)
def __delitem__(self, key):
# type: (Any) -> None
# for merged in getattr(self, merge_attrib, []):
# if key in merged[1]:
# value = merged[1][key]
# break
# else:
# # not found in merged in stuff
# ordereddict.__delitem__(self, key)
# for referer in self._ref:
# referer.update=_key_value(key)
# return
#
# ordereddict.__setitem__(self, key, value) # merge might have different value
# self._ok.discard(key)
self._ok.discard(key)
ordereddict.__delitem__(self, key)
for referer in self._ref:
referer.update_key_value(key)
def __iter__(self):
# type: () -> Any
for x in ordereddict.__iter__(self):
yield x
def _keys(self):
# type: () -> Any
for x in ordereddict.__iter__(self):
yield x
def __len__(self):
# type: () -> int
return int(ordereddict.__len__(self))
def __eq__(self, other):
# type: (Any) -> bool
return bool(dict(self) == other)
def keys(self):
# type: () -> Any
return CommentedMapKeysView(self)
def values(self):
# type: () -> Any
return CommentedMapValuesView(self)
def _items(self):
# type: () -> Any
for x in ordereddict.__iter__(self):
yield x, ordereddict.__getitem__(self, x)
def items(self):
# type: () -> Any
return CommentedMapItemsView(self)
@property
def merge(self):
# type: () -> Any
if not hasattr(self, merge_attrib):
setattr(self, merge_attrib, [])
return getattr(self, merge_attrib)
def copy(self):
# type: () -> Any
x = type(self)() # update doesn't work
for k, v in self._items():
x[k] = v
self.copy_attributes(x)
return x
def add_referent(self, cm):
# type: (Any) -> None
if cm not in self._ref:
self._ref.append(cm)
def add_yaml_merge(self, value):
# type: (Any) -> None
for v in value:
v[1].add_referent(self)
for k, v in v[1].items():
if ordereddict.__contains__(self, k):
continue
ordereddict.__setitem__(self, k, v)
self.merge.extend(value)
def update_key_value(self, key):
# type: (Any) -> None
if key in self._ok:
return
for v in self.merge:
if key in v[1]:
ordereddict.__setitem__(self, key, v[1][key])
return
ordereddict.__delitem__(self, key)
def __deepcopy__(self, memo):
# type: (Any) -> Any
res = self.__class__()
memo[id(self)] = res
for k in self:
res[k] = copy.deepcopy(self[k], memo)
self.copy_attributes(res, memo=memo)
return res
# based on brownie mappings
@classmethod # type: ignore
def raise_immutable(cls, *args, **kwargs):
# type: (Any, *Any, **Any) -> None
raise TypeError('{} objects are immutable'.format(cls.__name__))
| CommentedMap |
python | pytorch__pytorch | .github/scripts/gitutils.py | {
"start": 12743,
"end": 15896
} | class ____(Iterator[str]):
def __init__(self, val: str) -> None:
self._val = val
self._idx = -1
def peek(self) -> Optional[str]:
if self._idx + 1 >= len(self._val):
return None
return self._val[self._idx + 1]
def __iter__(self) -> "PeekableIterator":
return self
def __next__(self) -> str:
rc = self.peek()
if rc is None:
raise StopIteration
self._idx += 1
return rc
def patterns_to_regex(allowed_patterns: list[str]) -> Any:
"""
pattern is glob-like, i.e. the only special sequences it has are:
- ? - matches single character
- * - matches any non-folder separator characters or no character
- ** - matches any characters or no character
Assuming that patterns are free of braces and backslashes
the only character that needs to be escaped are dot and plus
"""
rc = "("
for idx, pattern in enumerate(allowed_patterns):
if idx > 0:
rc += "|"
pattern_ = PeekableIterator(pattern)
assert not any(c in pattern for c in "{}()[]\\")
for c in pattern_:
if c == ".":
rc += "\\."
elif c == "+":
rc += "\\+"
elif c == "*":
if pattern_.peek() == "*":
next(pattern_)
rc += ".*"
else:
rc += "[^/]*"
else:
rc += c
rc += ")"
return re.compile(rc)
def _shasum(value: str) -> str:
import hashlib
m = hashlib.sha256()
m.update(value.encode("utf-8"))
return m.hexdigest()
def is_commit_hash(ref: str) -> bool:
"True if ref is hexadecimal number, else false"
try:
int(ref, 16)
except ValueError:
return False
return True
def are_ghstack_branches_in_sync(
repo: GitRepo, head_ref: str, base_ref: Optional[str] = None
) -> bool:
"""Checks that diff between base and head is the same as diff between orig and its parent"""
orig_ref = re.sub(r"/head$", "/orig", head_ref)
if base_ref is None:
base_ref = re.sub(r"/head$", "/base", head_ref)
orig_diff_sha = _shasum(repo.diff(f"{repo.remote}/{orig_ref}"))
head_diff_sha = _shasum(
repo.diff(
base_ref if is_commit_hash(base_ref) else f"{repo.remote}/{base_ref}",
f"{repo.remote}/{head_ref}",
)
)
return orig_diff_sha == head_diff_sha
def retries_decorator(
rc: Any = None, num_retries: int = 3
) -> Callable[[Callable[..., T]], Callable[..., T]]:
def decorator(f: Callable[..., T]) -> Callable[..., T]:
@wraps(f)
def wrapper(*args: list[Any], **kwargs: dict[str, Any]) -> T:
for idx in range(num_retries):
try:
return f(*args, **kwargs)
except Exception as e:
print(
f'Attempt {idx} of {num_retries} to call {f.__name__} failed with "{e}"'
)
return cast(T, rc)
return wrapper
return decorator
| PeekableIterator |
python | ijl__orjson | test/test_dataclass.py | {
"start": 1175,
"end": 1389
} | class ____:
a: InitVar[str]
b: InitVar[str]
cls_var: ClassVar[str] = "cls"
ab: str = ""
def __post_init__(self, a: str, b: str):
self._other = 1
self.ab = f"{a} {b}"
| InitDataclass |
python | realpython__materials | python-protocol/contents.py | {
"start": 232,
"end": 358
} | class ____(ContentCreator, Protocol):
videos: list[str]
def add_video(self, title: str, path: str) -> None: ...
| Vlogger |
python | lxml__lxml | src/lxml/cssselect.py | {
"start": 1444,
"end": 1766
} | class ____(LxmlTranslator, external_cssselect.HTMLTranslator):
"""
lxml extensions + HTML support.
"""
def _make_lower_case(context, s):
return s.lower()
ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
ns.prefix = '__lxml_internal_css'
ns['lower-case'] = _make_lower_case
| LxmlHTMLTranslator |
python | pennersr__django-allauth | allauth/socialaccount/providers/xing/views.py | {
"start": 186,
"end": 353
} | class ____(OAuth):
url = "https://api.xing.com/v1/users/me.json"
def get_user_info(self):
user = self.query(self.url).json()
return user
| XingAPI |
python | sympy__sympy | sympy/core/assumptions.py | {
"start": 14075,
"end": 22761
} | class ____(FactKB):
"""A FactKB specialized for the built-in rules
This is the only kind of FactKB that Basic objects should use.
"""
def __init__(self, facts=None):
super().__init__(_assume_rules)
# save a copy of the facts dict
if not facts:
self._generator = {}
elif not isinstance(facts, FactKB):
self._generator = facts.copy()
else:
self._generator = facts.generator
if facts:
self.deduce_all_facts(facts)
def copy(self):
return self.__class__(self)
@property
def generator(self):
return self._generator.copy()
def as_property(fact):
"""Convert a fact name to the name of the corresponding property"""
return 'is_%s' % fact
def make_property(fact):
"""Create the automagic property corresponding to a fact."""
def getit(self):
try:
return self._assumptions[fact]
except KeyError:
if self._assumptions is self.default_assumptions:
self._assumptions = self.default_assumptions.copy()
return _ask(fact, self)
getit.func_name = as_property(fact)
return property(getit)
def _ask(fact, obj):
"""
Find the truth value for a property of an object.
This function is called when a request is made to see what a fact
value is.
For this we use several techniques:
First, the fact-evaluation function is tried, if it exists (for
example _eval_is_integer). Then we try related facts. For example
rational --> integer
another example is joined rule:
integer & !odd --> even
so in the latter case if we are looking at what 'even' value is,
'integer' and 'odd' facts will be asked.
In all cases, when we settle on some fact value, its implications are
deduced, and the result is cached in ._assumptions.
"""
# FactKB which is dict-like and maps facts to their known values:
assumptions = obj._assumptions
# A dict that maps facts to their handlers:
handler_map = obj._prop_handler
# This is our queue of facts to check:
facts_to_check = [fact]
facts_queued = {fact}
# Loop over the queue as it extends
for fact_i in facts_to_check:
# If fact_i has already been determined then we don't need to rerun the
# handler. There is a potential race condition for multithreaded code
# though because it's possible that fact_i was checked in another
# thread. The main logic of the loop below would potentially skip
# checking assumptions[fact] in this case so we check it once after the
# loop to be sure.
if fact_i in assumptions:
continue
# Now we call the associated handler for fact_i if it exists.
fact_i_value = None
handler_i = handler_map.get(fact_i)
if handler_i is not None:
fact_i_value = handler_i(obj)
# If we get a new value for fact_i then we should update our knowledge
# of fact_i as well as any related facts that can be inferred using the
# inference rules connecting the fact_i and any other fact values that
# are already known.
if fact_i_value is not None:
assumptions.deduce_all_facts(((fact_i, fact_i_value),))
# Usually if assumptions[fact] is now not None then that is because of
# the call to deduce_all_facts above. The handler for fact_i returned
# True or False and knowing fact_i (which is equal to fact in the first
# iteration) implies knowing a value for fact. It is also possible
# though that independent code e.g. called indirectly by the handler or
# called in another thread in a multithreaded context might have
# resulted in assumptions[fact] being set. Either way we return it.
fact_value = assumptions.get(fact)
if fact_value is not None:
return fact_value
# Extend the queue with other facts that might determine fact_i. Here
# we randomise the order of the facts that are checked. This should not
# lead to any non-determinism if all handlers are logically consistent
# with the inference rules for the facts. Non-deterministic assumptions
# queries can result from bugs in the handlers that are exposed by this
# call to shuffle. These are pushed to the back of the queue meaning
# that the inference graph is traversed in breadth-first order.
new_facts_to_check = list(_assume_rules.prereq[fact_i] - facts_queued)
shuffle(new_facts_to_check)
facts_to_check.extend(new_facts_to_check)
facts_queued.update(new_facts_to_check)
# The above loop should be able to handle everything fine in a
# single-threaded context but in multithreaded code it is possible that
# this thread skipped computing a particular fact that was computed in
# another thread (due to the continue). In that case it is possible that
# fact was inferred and is now stored in the assumptions dict but it wasn't
# checked for in the body of the loop. This is an obscure case but to make
# sure we catch it we check once here at the end of the loop.
if fact in assumptions:
return assumptions[fact]
# This query can not be answered. It's possible that e.g. another thread
# has already stored None for fact but assumptions._tell does not mind if
# we call _tell twice setting the same value. If this raises
# InconsistentAssumptions then it probably means that another thread
# attempted to compute this and got a value of True or False rather than
# None. In that case there must be a bug in at least one of the handlers.
# If the handlers are all deterministic and are consistent with the
# inference rules then the same value should be computed for fact in all
# threads.
assumptions._tell(fact, None)
return None
def _prepare_class_assumptions(cls):
"""Precompute class level assumptions and generate handlers.
This is called by Basic.__init_subclass__ each time a Basic subclass is
defined.
"""
local_defs = {}
for k in _assume_defined:
attrname = as_property(k)
v = cls.__dict__.get(attrname, '')
if isinstance(v, (bool, int, type(None))):
if v is not None:
v = bool(v)
local_defs[k] = v
defs = {}
for base in reversed(cls.__bases__):
assumptions = getattr(base, '_explicit_class_assumptions', None)
if assumptions is not None:
defs.update(assumptions)
defs.update(local_defs)
cls._explicit_class_assumptions = defs
cls.default_assumptions = StdFactKB(defs)
cls._prop_handler = {}
for k in _assume_defined:
eval_is_meth = getattr(cls, '_eval_is_%s' % k, None)
if eval_is_meth is not None:
cls._prop_handler[k] = eval_is_meth
# Put definite results directly into the class dict, for speed
for k, v in cls.default_assumptions.items():
setattr(cls, as_property(k), v)
# protection e.g. for Integer.is_even=F <- (Rational.is_integer=F)
derived_from_bases = set()
for base in cls.__bases__:
default_assumptions = getattr(base, 'default_assumptions', None)
# is an assumption-aware class
if default_assumptions is not None:
derived_from_bases.update(default_assumptions)
for fact in derived_from_bases - set(cls.default_assumptions):
pname = as_property(fact)
if pname not in cls.__dict__:
setattr(cls, pname, make_property(fact))
# Finally, add any missing automagic property (e.g. for Basic)
for fact in _assume_defined:
pname = as_property(fact)
if not hasattr(cls, pname):
setattr(cls, pname, make_property(fact))
# XXX: ManagedProperties used to be the metaclass for Basic but now Basic does
# not use a metaclass. We leave this here for backwards compatibility for now
# in case someone has been using the ManagedProperties class in downstream
# code. The reason that it might have been used is that when subclassing a
# class and wanting to use a metaclass the metaclass must be a subclass of the
# metaclass for the class that is being subclassed. Anyone wanting to subclass
# Basic and use a metaclass in their subclass would have needed to subclass
# ManagedProperties. Here ManagedProperties is not the metaclass for Basic any
# more but it should still be usable as a metaclass for Basic subclasses since
# it is a subclass of type which is now the metaclass for Basic.
| StdFactKB |
python | gevent__gevent | src/gevent/tests/test__hub.py | {
"start": 13126,
"end": 13728
} | class ____(unittest.TestCase):
def tearDown(self):
try:
del get_hub().handle_error
except AttributeError:
pass
def test_exception_in_custom_handle_error_does_not_crash(self):
def bad_handle_error(*args):
raise AttributeError
get_hub().handle_error = bad_handle_error
class MyException(Exception):
pass
def raises():
raise MyException
with self.assertRaises(MyException):
gevent.spawn(raises).get()
if __name__ == '__main__':
greentest.main()
| TestHandleError |
python | getsentry__sentry | src/sentry/migrations/0991_projectownership_json_field.py | {
"start": 244,
"end": 1760
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0990_groupowner_json_field"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[mod.to_jsonb("sentry_projectownership", "schema")],
state_operations=[
migrations.AlterField(
model_name="projectownership",
name="schema",
field=models.JSONField(null=True),
),
],
)
]
| Migration |
python | huggingface__transformers | src/transformers/models/falcon_h1/modeling_falcon_h1.py | {
"start": 24186,
"end": 48881
} | class ____(nn.Module):
"""
FalconH1Mixer is identical to classic Mamba2 mixer classes but differs on two different things
- Users can pass custom intermediate_size through `config.mamba_d_ssm`
- The use of gated RMS normalization layer is optional
"""
def __init__(self, config: FalconH1Config, layer_idx: int):
super().__init__()
self.num_heads = config.mamba_n_heads
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = (
int(config.mamba_expand * self.hidden_size) if config.mamba_d_ssm is None else config.mamba_d_ssm
)
self.layer_idx = layer_idx
self.use_conv_bias = config.mamba_conv_bias
self.activation = config.hidden_act
self.act = ACT2FN[config.hidden_act]
self.use_bias = config.mamba_proj_bias
self.layer_norm_epsilon = config.rms_norm_eps
self.groups_time_state_size = config.mamba_n_groups * self.ssm_state_size
self.n_groups = config.mamba_n_groups
self.head_dim = config.mamba_d_head
self.chunk_size = config.mamba_chunk_size
# FIXME:
self.time_step_limit = (0.0, float("inf"))
self.time_step_min = 0.001
self.time_step_max = 0.1
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=config.mamba_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.conv_dim,
padding=self.conv_kernel_size - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=self.use_bias,
)
# selective projection used to make dt, B and C input dependant
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.mamba_rms_norm = config.mamba_rms_norm
if self.mamba_rms_norm:
self.norm = FalconH1RMSNormGated(
self.intermediate_size,
eps=self.layer_norm_epsilon,
n_groups=self.n_groups,
norm_before_gate=config.mamba_norm_before_gate,
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.out_proj = nn.Linear(self.intermediate_size, config.hidden_size, bias=config.projectors_bias)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
else:
logger.warning_once("The fast path for FalconH1 will be used when running the model on a GPU")
self.zxbcdt_multipliers = config.ssm_multipliers
self.ssm_in_multiplier = config.ssm_in_multiplier
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
# 1. Gated MLP's linear projection
hidden_states = apply_mask_to_padding_states(hidden_states, attention_mask)
# Add Multipliers
hidden_states = hidden_states * self.ssm_in_multiplier
projected_states = self.in_proj(hidden_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# Set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# getting projected states from cache if it exists
if use_precomputed_states:
d_mlp = (projected_states.squeeze(1).shape[-1] - d_to_remove) // 2
z0, x0, gate, hidden_states_B_C, dt = projected_states.squeeze(1).split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# 2. Convolution sequence transformation
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=gate.view(batch_size, self.num_heads, self.head_dim) if not self.mamba_rms_norm else None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
if self.mamba_rms_norm:
hidden_states = self.norm(hidden_states, gate)
if d_mlp > 0:
hidden_states = torch.cat([F.silu(z0) * x0, hidden_states], dim=-1)
# 4. Final linear projection
out = self.out_proj(hidden_states[:, None, ...])
# Fused calculations or step by step if no initialized cache is found
else:
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit == (0.0, float("inf")) else {"dt_limit": self.time_step_limit}
# 2-4. Fused kernel for conv1d, SSM, and the final projection
if self.training and cache_params is None:
out = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=None, # was seq_idx
activation=self.activation,
rmsnorm_weight=self.norm.weight if self.mamba_rms_norm else None,
rmsnorm_eps=self.norm.variance_epsilon if self.mamba_rms_norm else None,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=False,
**dt_limit_kwargs,
)
else:
d_mlp = (
projected_states.shape[-1]
- 2 * self.intermediate_size
- 2 * self.n_groups * self.ssm_state_size
- self.num_heads
) // 2
if attention_mask is not None:
projected_states = projected_states * attention_mask[..., None]
_, gate, hidden_states_B_C, dt = projected_states.split(
[
2 * d_mlp,
self.intermediate_size,
self.conv_dim,
self.num_heads,
],
dim=-1,
)
if cache_params is not None:
conv_states = F.pad(
hidden_states_B_C.permute(0, 2, 1),
(self.conv_kernel_size - hidden_states_B_C.shape[-2], 0),
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
time_step = nn.functional.softplus(dt + self.dt_bias)
# 1D Convolution
if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
) # (B, L, self.d_inner + 2 * ngroups * d_state)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(
hidden_states_B_C,
[
self.intermediate_size,
groups_time_state_size,
groups_time_state_size,
],
dim=-1,
)
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
# This is a hack to make sure multi-GPU inference works with HF accelerate
# see: https://github.com/Dao-AILab/flash-attention/issues/523 for more details
with torch.cuda.device(hidden_states.device):
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
time_step,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=None,
return_final_states=True,
**dt_limit_kwargs,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
if self.mamba_rms_norm:
out = self.norm(scan_output, gate)
else:
out = scan_output * torch.nn.functional.silu(gate)
out = self.out_proj(out)
return out
# fmt: off
def torch_forward(
self,
input_states,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
input_states = apply_mask_to_padding_states(input_states, attention_mask)
# Add Multipliers
input_states = input_states * self.ssm_in_multiplier
projected_states = self.in_proj(input_states)
projected_states = projected_states * self.mup_vector # ADD Mup Multipliers
gate, hidden_states_B_C, dt = projected_states.split([
self.intermediate_size, self.conv_dim, self.num_heads
], dim=-1)
use_precomputed_states = (
cache_params is not None
and cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0]
== cache_params.ssm_states[self.layer_idx].shape[0]
== batch_size
and cache_position is not None
and cache_position[0] > 0
)
# 2. Convolution sequence transformation
if use_precomputed_states:
cache_params.conv_states[self.layer_idx] = cache_params.conv_states[self.layer_idx].roll(shifts=-1, dims=-1)
cache_params.conv_states[self.layer_idx][:, :, -1] = hidden_states_B_C[:, 0, :].to(cache_params.conv_states[self.layer_idx].device)
# We need to guarantee that anything regarding the cache is on the same device
conv_states = cache_params.conv_states[self.layer_idx].to(device=self.conv1d.weight.device)
hidden_states_B_C = torch.sum(
conv_states * self.conv1d.weight.squeeze(1), dim=-1
)
if self.use_conv_bias:
hidden_states_B_C = hidden_states_B_C + self.conv1d.bias
hidden_states_B_C = self.act(hidden_states_B_C)
else:
# Init cache
if cache_params is not None:
hidden_states_B_C_transposed = hidden_states_B_C.transpose(1, 2)
conv_states = nn.functional.pad(
hidden_states_B_C_transposed, (self.conv_kernel_size - hidden_states_B_C_transposed.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states_B_C = self.act(self.conv1d(hidden_states_B_C.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states_B_C = apply_mask_to_padding_states(hidden_states_B_C, attention_mask)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size],
dim=-1
)
# 3. SSM transformation
A = -torch.exp(self.A_log.float()) # [num_heads]
if use_precomputed_states:
# We need to guarantee that anything regarding the cache is on the same device
cache_device = cache_params.ssm_states[self.layer_idx].device
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = (torch.exp(dt[..., None] * A)).to(device=cache_device)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = (dB * hidden_states[..., None]).to(device=cache_device)
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(device=C.device, dtype=C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_limit[0], self.time_step_limit[1])
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# Contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, :, :] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
# Compute Y_diag (apply to values)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(dim=3)
# 2. Compute the state for each intra-chunk
# (right term of low-rank factorization of off-diagonal blocks; B terms)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay = B * decay_states.permute(0, -2, -1, 1)[..., None]
states = (B_decay[..., None, :] * hidden_states[..., None]).sum(dim=2)
# 3. Compute the inter-chunk SSM recurrence; produces correct SSM states at chunk boundaries
# (middle term of factorization of off-diag blocks; A terms)
if use_precomputed_states:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...].to(device=states.device)
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
decay_chunk = decay_chunk.transpose(1, 3)
new_states = (decay_chunk[..., None, None] * states[:, :, None, ...]).sum(dim=1)
states, ssm_state = new_states[:, :-1], new_states[:, -1]
# 4. Compute state -> output conversion per chunk
# (left term of low-rank factorization of off-diagonal blocks; C terms)
state_decay_out = torch.exp(A_cumsum)
C_times_states = (C[..., None, :] * states[:, :, None, ...])
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
y = Y_diag + Y_off
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
# Cutting off padded chunks
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
# Init cache
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
if self.mamba_rms_norm:
scan_output = self.norm(y, gate)
else:
scan_output = y * torch.nn.functional.silu(gate)
# end ssd naive
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: Optional[FalconHybridMambaAttentionDynamicCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
dtype = hidden_states.dtype
if attention_mask is not None and attention_mask.shape[1] > 1 and attention_mask.shape[0] > 1:
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
return self.torch_forward(hidden_states, cache_params, cache_position, attention_mask)
| FalconH1Mixer |
python | jazzband__django-polymorphic | src/polymorphic/formsets/models.py | {
"start": 3763,
"end": 12826
} | class ____(BaseModelFormSet):
"""
A formset that can produce different forms depending on the object type.
Note that the 'add' feature is therefore more complex,
as all variations need ot be exposed somewhere.
When switching existing formsets to the polymorphic formset,
note that the ID field will no longer be named ''model_ptr'',
but just appear as ''id''.
"""
# Assigned by the factory
child_forms = OrderedDict()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queryset_data = self.get_queryset()
def _construct_form(self, i, **kwargs):
"""
Create the form, depending on the model that's behind it.
"""
# BaseModelFormSet logic
if self.is_bound and i < self.initial_form_count():
pk_key = f"{self.add_prefix(i)}-{self.model._meta.pk.name}"
pk = self.data[pk_key]
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs["instance"] = self._existing_object(pk)
if i < self.initial_form_count() and "instance" not in kwargs:
kwargs["instance"] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs["initial"] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
# BaseFormSet logic, with custom formset_class
defaults = {
"auto_id": self.auto_id,
"prefix": self.add_prefix(i),
"error_class": self.error_class,
}
if self.is_bound:
defaults["data"] = self.data
defaults["files"] = self.files
if self.initial and "initial" not in kwargs:
try:
defaults["initial"] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty, unless they're part of
# the minimum forms.
if i >= self.initial_form_count() and i >= self.min_num:
defaults["empty_permitted"] = True
defaults["use_required_attribute"] = False
defaults.update(kwargs)
# Need to find the model that will be displayed in this form.
# Hence, peeking in the self.queryset_data beforehand.
if self.is_bound:
if "instance" in defaults:
# Object is already bound to a model, won't change the content type
model = defaults["instance"].get_real_instance_class() # allow proxy models
else:
# Extra or empty form, use the provided type.
# Note this completely tru
prefix = defaults["prefix"]
try:
ct_id = int(self.data[f"{prefix}-polymorphic_ctype"])
except (KeyError, ValueError):
raise ValidationError(
f"Formset row {prefix} has no 'polymorphic_ctype' defined!"
)
model = ContentType.objects.get_for_id(ct_id).model_class()
if model not in self.child_forms:
# Perform basic validation, as we skip the ChoiceField here.
raise UnsupportedChildType(
f"Child model type {model} is not part of the formset"
)
else:
if "instance" in defaults:
model = defaults["instance"].get_real_instance_class() # allow proxy models
elif "polymorphic_ctype" in defaults.get("initial", {}):
model = defaults["initial"]["polymorphic_ctype"].model_class()
elif i < len(self.queryset_data):
model = self.queryset_data[i].__class__
else:
# Extra forms, cycle between all types
# TODO: take the 'extra' value of each child formset into account.
total_known = len(self.queryset_data)
child_models = list(self.child_forms.keys())
model = child_models[(i - total_known) % len(child_models)]
form_class = self.get_form_class(model)
form = form_class(**defaults)
self.add_fields(form, i)
return form
def add_fields(self, form, index):
"""Add a hidden field for the content type."""
ct = ContentType.objects.get_for_model(form._meta.model, for_concrete_model=False)
choices = [(ct.pk, ct)] # Single choice, existing forms can't change the value.
form.fields["polymorphic_ctype"] = forms.TypedChoiceField(
choices=choices,
initial=ct.pk,
required=False,
widget=forms.HiddenInput,
coerce=int,
)
super().add_fields(form, index)
def get_form_class(self, model):
"""
Return the proper form class for the given model.
"""
if not self.child_forms:
raise ImproperlyConfigured(f"No 'child_forms' defined in {self.__class__.__name__}")
if not issubclass(model, PolymorphicModel):
raise TypeError(f"Expect polymorphic model type, not {model}")
try:
return self.child_forms[model]
except KeyError:
# This may happen when the query returns objects of a type that was not handled by the formset.
raise UnsupportedChildType(
f"The '{self.__class__.__name__}' found a '{model.__name__}' model in the queryset, "
f"but no form class is registered to display it."
)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
return any(f.is_multipart() for f in self.empty_forms)
@property
def media(self):
# Include the media of all form types.
# The form media includes all form widget media
media = forms.Media()
for form in self.empty_forms:
add_media(media, form.media)
return media
@cached_property
def empty_forms(self):
"""
Return all possible empty forms
"""
forms = []
for model, form_class in self.child_forms.items():
kwargs = self.get_form_kwargs(None)
form = form_class(
auto_id=self.auto_id,
prefix=self.add_prefix("__prefix__"),
empty_permitted=True,
use_required_attribute=False,
**kwargs,
)
self.add_fields(form, None)
forms.append(form)
return forms
@property
def empty_form(self):
# TODO: make an exception when can_add_base is defined?
raise RuntimeError(
"'empty_form' is not used in polymorphic formsets, use 'empty_forms' instead."
)
def polymorphic_modelformset_factory(
model,
formset_children,
formset=BasePolymorphicModelFormSet,
# Base field
# TODO: should these fields be removed in favor of creating
# the base form as a formset child too?
form=ModelForm,
fields=None,
exclude=None,
extra=1,
can_order=False,
can_delete=True,
max_num=None,
formfield_callback=None,
widgets=None,
validate_max=False,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
min_num=None,
validate_min=False,
field_classes=None,
child_form_kwargs=None,
):
"""
Construct the class for an polymorphic model formset.
All arguments are identical to :func:'~django.forms.models.modelformset_factory',
with the exception of the ''formset_children'' argument.
:param formset_children: A list of all child :class:'PolymorphicFormSetChild' objects
that tell the inline how to render the child model types.
:type formset_children: Iterable[PolymorphicFormSetChild]
:rtype: type
"""
kwargs = {
"model": model,
"form": form,
"formfield_callback": formfield_callback,
"formset": formset,
"extra": extra,
"can_delete": can_delete,
"can_order": can_order,
"fields": fields,
"exclude": exclude,
"min_num": min_num,
"max_num": max_num,
"widgets": widgets,
"validate_min": validate_min,
"validate_max": validate_max,
"localized_fields": localized_fields,
"labels": labels,
"help_texts": help_texts,
"error_messages": error_messages,
"field_classes": field_classes,
}
FormSet = modelformset_factory(**kwargs)
child_kwargs = {
# 'exclude': exclude,
}
if child_form_kwargs:
child_kwargs.update(child_form_kwargs)
FormSet.child_forms = polymorphic_child_forms_factory(formset_children, **child_kwargs)
return FormSet
| BasePolymorphicModelFormSet |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/module_manpath_setenv/package.py | {
"start": 217,
"end": 543
} | class ____(Package):
homepage = "http://www.spack.llnl.gov"
url = "http://www.spack.llnl.gov/module-manpath-setenv-1.0.tar.gz"
version("1.0", "0123456789abcdef0123456789abcdef")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.set("MANPATH", "/path/to/man")
| ModuleManpathSetenv |
python | coleifer__peewee | tests/fields.py | {
"start": 10312,
"end": 10431
} | class ____(TestModel):
user = ForeignKeyField(U2, backref='tweets', on_delete='CASCADE')
content = TextField()
| T2 |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_lookup_py38.py | {
"start": 3499,
"end": 6227
} | class ____:
left: typing.Union["Node", int]
right: typing.Union["Node", int]
@pytest.mark.skipif(
settings.get_current_profile_name() == "crosshair",
reason="takes ~11 mins; datastructure explosion: https://github.com/pschanely/hypothesis-crosshair/issues/27",
)
@given(st.builds(Node))
def test_can_resolve_recursive_dataclass(val):
assert isinstance(val, Node)
def test_can_register_new_type_for_typeddicts():
sentinel = object()
with temp_registered(C, st.just(sentinel)):
assert_simple_property(st.from_type(C), lambda v: v is sentinel)
@pytest.mark.parametrize(
"lam,source",
[
((lambda a, /, b: a), "lambda a, /, b: a"),
((lambda a=None, /, b=None: a), "lambda a=None, /, b=None: a"),
],
)
def test_posonly_lambda_formatting(lam, source):
# Testing posonly lambdas, with and without default values
assert get_pretty_function_description(lam) == source
def test_does_not_convert_posonly_to_keyword():
args, kws = convert_positional_arguments(lambda x, /: None, (1,), {})
assert args
assert not kws
@given(x=st.booleans())
def test_given_works_with_keyword_only_params(*, x):
pass
def test_given_works_with_keyword_only_params_some_unbound():
@given(x=st.booleans())
def test(*, x, y):
assert y is None
test(y=None)
def test_given_works_with_positional_only_params():
@given(y=st.booleans())
def test(x, /, y):
pass
test(None)
def test_cannot_pass_strategies_by_position_if_there_are_posonly_args():
@given(st.booleans())
def test(x, /, y):
pass
with pytest.raises(InvalidArgument):
test(None)
@fails_with(InvalidArgument)
@given(st.booleans())
def test_cannot_pass_strategies_for_posonly_args(x, /):
pass
@given(y=st.booleans())
def has_posonly_args(x, /, y):
pass
@pytest.mark.xfail(
settings.get_current_profile_name() == "threading",
reason=(
"dynamic @example applications modify the shared "
"has_posonly_args.hypothesis._given_kwargs."
),
strict=False,
)
def test_example_argument_validation():
example(y=None)(has_posonly_args)(1) # Basic case is OK
with pytest.raises(
InvalidArgument,
match=re.escape(
"Cannot pass positional arguments to @example() when decorating "
"a test function which has positional-only parameters."
),
):
example(None)(has_posonly_args)(1)
with pytest.raises(
InvalidArgument,
match=re.escape(
"Inconsistent args: @given() got strategies for 'y', "
"but @example() got arguments for 'x'"
),
):
example(x=None)(has_posonly_args)(1)
| Node |
python | getsentry__sentry | tests/sentry/preprod/vcs/status_checks/size/test_templates.py | {
"start": 13684,
"end": 31268
} | class ____(StatusCheckTestBase):
"""Tests for formatting artifacts in successful/analyzed states."""
def test_multiple_artifacts_all_analyzed(self):
"""Test formatting for multiple artifacts all analyzed."""
artifacts = []
size_metrics_map = {}
for i in range(2):
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id=f"com.example.app{i}",
build_version="1.0.0",
build_number=i + 1,
)
artifacts.append(artifact)
size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=(i + 1) * 1024 * 1024, # Different sizes
max_download_size=(i + 1) * 1024 * 1024,
min_install_size=(i + 2) * 1024 * 1024,
max_install_size=(i + 2) * 1024 * 1024,
)
size_metrics_map[artifact.id] = [size_metrics]
title, subtitle, summary = format_status_check_messages(
artifacts, size_metrics_map, StatusCheckStatus.SUCCESS
)
assert title == "Size Analysis"
assert subtitle == "2 apps analyzed"
assert "1.0 MB" in summary # First artifact download size
assert "2.1 MB" in summary # Second artifact download size
assert "com.example.app0" in summary
assert "com.example.app1" in summary
def test_multiple_metric_types_per_artifact(self):
"""Test formatting with multiple metric types per artifact (main app + watch)."""
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app",
build_version="1.0.0",
build_number=1,
)
main_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=1024 * 1024, # 1 MB
max_download_size=1024 * 1024,
min_install_size=2 * 1024 * 1024, # 2 MB
max_install_size=2 * 1024 * 1024,
)
watch_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=512 * 1024, # 0.5 MB
max_download_size=512 * 1024,
min_install_size=1024 * 1024, # 1 MB
max_install_size=1024 * 1024,
)
size_metrics_map = {artifact.id: [main_metrics, watch_metrics]}
title, subtitle, summary = format_status_check_messages(
[artifact], size_metrics_map, StatusCheckStatus.SUCCESS
)
assert title == "Size Analysis"
assert subtitle == "2 apps analyzed"
# Should have two rows - main app and watch app
assert "`com.example.app`" in summary # Both main and watch show app_id
assert "-- (Watch)" in summary # Watch app label
assert "1.0 MB" in summary # Main app download
assert "524.3 KB" in summary # Watch app download
assert "2.1 MB" in summary # Main app install
# Check that both rows appear together
lines = summary.split("\n")
main_row_idx = None
watch_row_idx = None
for i, line in enumerate(lines):
if "`com.example.app`" in line and "(Watch)" not in line:
main_row_idx = i
elif "-- (Watch)" in line:
watch_row_idx = i
assert main_row_idx is not None
assert watch_row_idx is not None
assert abs(main_row_idx - watch_row_idx) == 1 # Should be adjacent rows
def test_android_dynamic_feature_metrics(self):
"""Test formatting with Android dynamic features."""
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.android",
build_version="1.0.0",
build_number=1,
)
main_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=4 * 1024 * 1024, # 4 MB
max_download_size=4 * 1024 * 1024,
min_install_size=8 * 1024 * 1024, # 8 MB
max_install_size=8 * 1024 * 1024,
)
feature_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.ANDROID_DYNAMIC_FEATURE,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=1024 * 1024, # 1 MB
max_download_size=1024 * 1024,
min_install_size=2 * 1024 * 1024, # 2 MB
max_install_size=2 * 1024 * 1024,
identifier="premium_features",
)
size_metrics_map = {artifact.id: [main_metrics, feature_metrics]}
title, subtitle, summary = format_status_check_messages(
[artifact], size_metrics_map, StatusCheckStatus.SUCCESS
)
assert title == "Size Analysis"
assert subtitle == "2 apps analyzed"
# Should have two rows - main app and dynamic feature
assert "`com.example.android`" in summary # Main app and dynamic feature both show app_id
assert "-- (Dynamic Feature)" in summary # Dynamic feature label
assert "4.2 MB" in summary # Main app download (note: rounds to 4.2 not 4.0)
assert "1.0 MB" in summary # Dynamic feature download
assert "8.4 MB" in summary # Main app install
assert "2.1 MB" in summary # Dynamic feature install (note: rounds to 2.1 not 2.0)
def test_size_changes_with_base_artifacts(self):
"""Test size change calculations when base artifacts exist for comparison."""
head_commit_comparison = CommitComparison.objects.create(
head_repo_name="test/repo",
head_sha="head_sha_123",
base_sha="base_sha_456",
provider="github",
organization_id=self.organization.id,
)
base_commit_comparison = CommitComparison.objects.create(
head_repo_name="test/repo",
head_sha="base_sha_456",
provider="github",
organization_id=self.organization.id,
)
base_artifact = PreprodArtifact.objects.create(
project=self.project,
commit_comparison=base_commit_comparison,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.android",
build_version="1.0.2",
build_number=41,
)
PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=base_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=int(3.8 * 1024 * 1024), # 3.8 MB
max_download_size=int(3.8 * 1024 * 1024),
min_install_size=int(7.9 * 1024 * 1024), # 7.9 MB
max_install_size=int(7.9 * 1024 * 1024),
)
head_artifact = PreprodArtifact.objects.create(
project=self.project,
commit_comparison=head_commit_comparison,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.android",
build_version="1.0.3",
build_number=42,
)
head_size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=head_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=4 * 1024 * 1024, # 4.0 MB
max_download_size=4 * 1024 * 1024,
min_install_size=int(8.2 * 1024 * 1024), # 8.2 MB
max_install_size=int(8.2 * 1024 * 1024),
)
size_metrics_map = {head_artifact.id: [head_size_metrics]}
title, subtitle, summary = format_status_check_messages(
[head_artifact], size_metrics_map, StatusCheckStatus.SUCCESS
)
assert title == "Size Analysis"
assert subtitle == "1 app analyzed"
# Verify that size changes are calculated and displayed
assert "4.2 MB" in summary # Current download size
assert "8.6 MB" in summary # Current install size
# Verify that changes are shown (4.2MB - 4.0MB = 209.7KB, 8.6MB - 8.3MB = 314.6KB)
assert "+209.7 KB" in summary # Download change
assert "+314.6 KB" in summary # Install change
# Verify the table structure includes the Change columns
assert "Change" in summary
assert "com.example.android" in summary
assert "1.0.3 (42)" in summary
def test_size_changes_no_base_artifacts(self):
"""Test that N/A is shown when no base artifacts exist for comparison."""
artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.app",
build_version="1.0.0",
build_number=1,
)
size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=1024 * 1024, # 1 MB
max_download_size=1024 * 1024,
min_install_size=2 * 1024 * 1024, # 2 MB
max_install_size=2 * 1024 * 1024,
)
size_metrics_map = {artifact.id: [size_metrics]}
title, subtitle, summary = format_status_check_messages(
[artifact], size_metrics_map, StatusCheckStatus.SUCCESS
)
assert title == "Size Analysis"
assert subtitle == "1 app analyzed"
assert "1.0 MB" in summary
assert "2.1 MB" in summary
# Should show N/A for changes when no base exists
lines = summary.split("\n")
data_line = next(line for line in lines if "com.example.app" in line)
assert "N/A" in data_line # Change columns show N/A
def test_size_changes_with_different_artifact_types(self):
"""Test that size changes only compare the same artifact types."""
head_commit_comparison = CommitComparison.objects.create(
head_repo_name="test/repo",
head_sha="head_sha_789",
base_sha="base_sha_012",
provider="github",
organization_id=self.organization.id,
)
base_commit_comparison = CommitComparison.objects.create(
head_repo_name="test/repo",
head_sha="base_sha_012",
provider="github",
organization_id=self.organization.id,
)
# Create base artifact with only MAIN_ARTIFACT metrics
base_artifact = PreprodArtifact.objects.create(
project=self.project,
commit_comparison=base_commit_comparison,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.ios",
build_version="1.0.1",
build_number=10,
)
PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=base_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=int(2.8 * 1024 * 1024), # 2.8 MB
max_download_size=int(2.8 * 1024 * 1024),
min_install_size=int(6.5 * 1024 * 1024), # 6.5 MB
max_install_size=int(6.5 * 1024 * 1024),
)
# Create head artifact with MAIN_ARTIFACT and WATCH_ARTIFACT
head_artifact = PreprodArtifact.objects.create(
project=self.project,
commit_comparison=head_commit_comparison,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.ios",
build_version="1.0.2",
build_number=11,
)
head_main_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=head_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=3 * 1024 * 1024, # 3.0 MB
max_download_size=3 * 1024 * 1024,
min_install_size=int(6.8 * 1024 * 1024), # 6.8 MB
max_install_size=int(6.8 * 1024 * 1024),
)
head_watch_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=head_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=512 * 1024, # 512 KB
max_download_size=512 * 1024,
min_install_size=1024 * 1024, # 1 MB
max_install_size=1024 * 1024,
)
size_metrics_map = {head_artifact.id: [head_main_metrics, head_watch_metrics]}
_, _, summary = format_status_check_messages(
[head_artifact], size_metrics_map, StatusCheckStatus.SUCCESS
)
# Main artifact should show changes (has matching base)
assert "+209.7 KB" in summary # 3.1MB - 2.9MB = 209.7KB
assert "+314.6 KB" in summary # 7.1MB - 6.8MB = 314.6KB
# Watch artifact should show N/A (no matching base watch metrics)
lines = summary.split("\n")
watch_line = next(line for line in lines if "(Watch)" in line)
# Count N/A occurrences in the watch line - should be 3 (change columns + approval)
na_count = watch_line.count("N/A")
assert na_count >= 2 # At least 2 N/A for the change columns
def test_android_app_shows_uncompressed_label(self):
"""Test that Android apps show 'Uncompressed' instead of 'Install' in column header."""
# Test Android app
android_artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.android",
build_version="1.0.0",
build_number=1,
artifact_type=PreprodArtifact.ArtifactType.AAB,
)
android_size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=android_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=1024 * 1024, # 1 MB
max_download_size=1024 * 1024,
min_install_size=2 * 1024 * 1024, # 2 MB
max_install_size=2 * 1024 * 1024,
)
android_size_metrics_map = {android_artifact.id: [android_size_metrics]}
title, subtitle, android_summary = format_status_check_messages(
[android_artifact], android_size_metrics_map, StatusCheckStatus.SUCCESS
)
# Android app should show "Uncompressed" instead of "Install"
assert "Uncompressed" in android_summary
assert "Install" not in android_summary or android_summary.count("Install") == 0
# Test iOS app for comparison
ios_artifact = PreprodArtifact.objects.create(
project=self.project,
state=PreprodArtifact.ArtifactState.PROCESSED,
app_id="com.example.ios",
build_version="1.0.0",
build_number=1,
artifact_type=PreprodArtifact.ArtifactType.XCARCHIVE,
)
ios_size_metrics = PreprodArtifactSizeMetrics.objects.create(
preprod_artifact=ios_artifact,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
state=PreprodArtifactSizeMetrics.SizeAnalysisState.COMPLETED,
min_download_size=1024 * 1024, # 1 MB
max_download_size=1024 * 1024,
min_install_size=2 * 1024 * 1024, # 2 MB
max_install_size=2 * 1024 * 1024,
)
ios_size_metrics_map = {ios_artifact.id: [ios_size_metrics]}
title, subtitle, ios_summary = format_status_check_messages(
[ios_artifact], ios_size_metrics_map, StatusCheckStatus.SUCCESS
)
# iOS app should show "Install" not "Uncompressed"
assert "Install" in ios_summary
assert "Uncompressed" not in ios_summary
@region_silo_test
| SuccessStateFormattingTest |
python | getsentry__sentry | src/sentry/interfaces/user.py | {
"start": 237,
"end": 467
} | class ____(TypedDict, total=False):
id: str | None
email: str | None
username: str | None
ip_address: str | None
name: str | None
geo: dict[str, str] | None
data: dict[str, Any] | None
| EventUserApiContext |
python | pandas-dev__pandas | asv_bench/benchmarks/plotting.py | {
"start": 2323,
"end": 2557
} | class ____:
def setup(self):
N = 500
M = 10
self.df = DataFrame(np.random.randn(N, M))
self.df["Name"] = ["A"] * N
def time_plot_andrews_curves(self):
andrews_curves(self.df, "Name")
| Misc |
python | jd__tenacity | tenacity/stop.py | {
"start": 1797,
"end": 1965
} | class ____(stop_base):
"""Never stop."""
def __call__(self, retry_state: "RetryCallState") -> bool:
return False
stop_never = _stop_never()
| _stop_never |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess4.py | {
"start": 1817,
"end": 1954
} | class ____:
type TypeAlias1 = Callable[[], None]
def __init__(self):
self.someVarNoArgs: G.TypeAlias1
g_ta: G.TypeAlias1
| G |
python | huggingface__transformers | src/transformers/models/llama4/modeling_llama4.py | {
"start": 36216,
"end": 36832
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = nn.GELU() # ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size, bias=True)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size, bias=True)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
| Llama4VisionMLP |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_styles07.py | {
"start": 380,
"end": 4766
} | class ____(unittest.TestCase):
"""
Test assembling a complete Styles file.
"""
def test_assemble_xml_file(self):
"""Test for simple fills."""
self.maxDiff = None
fh = StringIO()
style = Styles()
style._set_filehandle(fh)
workbook = Workbook()
workbook.add_format({"pattern": 1, "bg_color": "red"})
workbook.add_format({"pattern": 11, "bg_color": "red"})
workbook.add_format({"pattern": 11, "bg_color": "red", "fg_color": "yellow"})
workbook.add_format({"pattern": 1, "bg_color": "red", "fg_color": "red"})
workbook._set_default_xf_indices()
workbook._prepare_format_properties()
style._set_style_properties(
[
workbook.xf_formats,
workbook.palette,
workbook.font_count,
workbook.num_formats,
workbook.border_count,
workbook.fill_count,
workbook.custom_colors,
workbook.dxf_formats,
workbook.has_comments,
]
)
style._assemble_xml_file()
workbook.fileclosed = 1
exp = _xml_to_list(
"""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">
<fonts count="1">
<font>
<sz val="11"/>
<color theme="1"/>
<name val="Calibri"/>
<family val="2"/>
<scheme val="minor"/>
</font>
</fonts>
<fills count="6">
<fill>
<patternFill patternType="none"/>
</fill>
<fill>
<patternFill patternType="gray125"/>
</fill>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFF0000"/>
<bgColor indexed="64"/>
</patternFill>
</fill>
<fill>
<patternFill patternType="lightHorizontal">
<bgColor rgb="FFFF0000"/>
</patternFill>
</fill>
<fill>
<patternFill patternType="lightHorizontal">
<fgColor rgb="FFFFFF00"/>
<bgColor rgb="FFFF0000"/>
</patternFill>
</fill>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFF0000"/>
<bgColor rgb="FFFF0000"/>
</patternFill>
</fill>
</fills>
<borders count="1">
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
</borders>
<cellStyleXfs count="1">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0"/>
</cellStyleXfs>
<cellXfs count="5">
<xf numFmtId="0" fontId="0" fillId="0" borderId="0" xfId="0"/>
<xf numFmtId="0" fontId="0" fillId="2" borderId="0" xfId="0" applyFill="1"/>
<xf numFmtId="0" fontId="0" fillId="3" borderId="0" xfId="0" applyFill="1"/>
<xf numFmtId="0" fontId="0" fillId="4" borderId="0" xfId="0" applyFill="1"/>
<xf numFmtId="0" fontId="0" fillId="5" borderId="0" xfId="0" applyFill="1"/>
</cellXfs>
<cellStyles count="1">
<cellStyle name="Normal" xfId="0" builtinId="0"/>
</cellStyles>
<dxfs count="0"/>
<tableStyles count="0" defaultTableStyle="TableStyleMedium9" defaultPivotStyle="PivotStyleLight16"/>
</styleSheet>
"""
)
got = _xml_to_list(fh.getvalue())
self.assertEqual(exp, got)
| TestAssembleStyles |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 42970,
"end": 43119
} | class ____(Structure):
_fields_ = [("clkApiDomain", c_uint),
("clkDomainFaultMask", c_uint)
]
| nvmlClkMonFaultInfo_t |
python | celery__celery | t/unit/utils/test_platforms.py | {
"start": 12022,
"end": 13060
} | class ____:
@patch('pwd.getpwuid')
@patch('os.initgroups', create=True)
def test_with_initgroups(self, initgroups_, getpwuid):
getpwuid.return_value = ['user']
initgroups(5001, 50001)
initgroups_.assert_called_with('user', 50001)
@patch('celery.platforms.setgroups')
@patch('grp.getgrall')
@patch('pwd.getpwuid')
def test_without_initgroups(self, getpwuid, getgrall, setgroups):
prev = getattr(os, 'initgroups', None)
try:
delattr(os, 'initgroups')
except AttributeError:
pass
try:
getpwuid.return_value = ['user']
class grent:
gr_mem = ['user']
def __init__(self, gid):
self.gr_gid = gid
getgrall.return_value = [grent(1), grent(2), grent(3)]
initgroups(5001, 50001)
setgroups.assert_called_with([1, 2, 3])
finally:
if prev:
os.initgroups = prev
@t.skip.if_win32
| test_initgroups |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_validators.py | {
"start": 2563,
"end": 3150
} | class ____(TestCase):
def test_validate_adds_creator_and_type(self) -> None:
validator = MockDataSourceValidator(
data={
"field1": "test",
"field2": 123,
}
)
assert validator.is_valid()
assert "_creator" in validator.validated_data
assert isinstance(validator.validated_data["_creator"], DataSourceCreator)
assert validator.validated_data["data_source_type"] == data_source_type_registry.get_key(
QuerySubscriptionDataSourceHandler
)
| TestBaseDataSourceValidator |
python | pyinstaller__pyinstaller | PyInstaller/depend/imphookapi.py | {
"start": 8031,
"end": 10877
} | class ____:
"""
Metadata communicating changes made by the current **pre-find module path hook** (i.e., hook run immediately
_before_ a call to `ModuleGraph._find_module_path()` finding the hooked module's absolute path) back to PyInstaller.
Pre-find module path hooks _must_ define a `pre_find_module_path()` function accepting an instance of this class,
whose attributes describe the subsequent `ModuleGraph._find_module_path()` call to be performed.
Pre-find module path hooks are typically used to change the absolute path from which a module will be
subsequently imported and thus frozen into the executable. To do so, hooks may overwrite the default
`search_dirs` list of the absolute paths of all directories to be searched for that module: e.g.,
def pre_find_module_path(api):
api.search_dirs = ['/the/one/true/package/providing/this/module']
Each pre-find module path hook is run _only_ on the first call to `ModuleGraph._find_module_path()` for the
corresponding module.
Attributes
----------
The following attributes are **mutable** (i.e., modifiable). All changes to these attributes will be immediately
respected by PyInstaller:
search_dirs : list
List of the absolute paths of all directories to be searched for this module (in order). Searching will halt
at the first directory containing this module.
Attributes (Immutable)
----------
The following attributes are **immutable** (i.e., read-only). For safety, any attempts to change these attributes
_will_ result in a raised exception:
module_name : str
Fully-qualified name of this module.
module_graph : PyiModuleGraph
Current module graph. For efficiency, this attribute is technically mutable. To preserve graph integrity,
this attribute should nonetheless _never_ be modified. While read-only `PyiModuleGraph` methods (e.g.,
`findNode()`) are safely callable from within pre-find module path hooks, methods modifying the graph are
_not_. If graph modifications are required, consider an alternative type of hook (e.g., pre-import module
hooks).
"""
def __init__(
self,
module_graph,
module_name,
search_dirs,
):
# Mutable attributes.
self.search_dirs = search_dirs
# Immutable attributes.
self._module_graph = module_graph
self._module_name = module_name
# Immutable properties. No corresponding setters are defined.
@property
def module_graph(self):
"""
Current module graph.
"""
return self._module_graph
@property
def module_name(self):
"""
Fully-qualified name of this module.
"""
return self._module_name
| PreFindModulePathAPI |
python | scrapy__scrapy | tests/test_exporters.py | {
"start": 16222,
"end": 16361
} | class ____(TestXmlItemExporter):
item_class = MyDataClass
custom_field_item_class = CustomFieldDataclass
| TestXmlItemExporterDataclass |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 861052,
"end": 861554
} | class ____(sgqlc.types.Type):
"""Represents a sort by field and direction."""
__schema__ = github_schema
__field_names__ = ("direction", "field")
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The direction of the sorting. Possible values are ASC and DESC."""
field = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field")
"""The field by which items are sorted."""
| ProjectV2SortByField |
python | doocs__leetcode | solution/1100-1199/1190.Reverse Substrings Between Each Pair of Parentheses/Solution.py | {
"start": 0,
"end": 360
} | class ____:
def reverseParentheses(self, s: str) -> str:
stk = []
for c in s:
if c == ")":
t = []
while stk[-1] != "(":
t.append(stk.pop())
stk.pop()
stk.extend(t)
else:
stk.append(c)
return "".join(stk)
| Solution |
python | ray-project__ray | release/benchmarks/distributed/many_nodes_tests/dashboard_test.py | {
"start": 794,
"end": 1234
} | class ____(BaseModel):
success: bool
# endpoints -> list of latencies
result: Dict[str, List[float]]
# Dashboard memory usage in MB.
memory_mb: Optional[float]
# Currently every endpoint is GET endpoints.
endpoints = [
"/logical/actors",
"/nodes?view=summary",
"/",
"/api/cluster_status",
"/events",
"/api/jobs/",
"/api/v0/logs",
"/api/prometheus_health",
]
@ray.remote(num_cpus=0)
| Result |
python | django__django | tests/backends/models.py | {
"start": 4122,
"end": 4189
} | class ____(models.Model):
raw_data = models.BinaryField()
| RawData |
python | kamyu104__LeetCode-Solutions | Python/construct-2d-grid-matching-graph-layout.py | {
"start": 60,
"end": 1236
} | class ____(object):
def constructGridLayout(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[List[int]]
"""
def bfs(u):
dist = [0]*n
dist[u] = 1
q = [u]
while q:
new_q = []
for u in q:
for v in adj[u]:
if dist[v]:
continue
dist[v] = dist[u]+1
new_q.append(v)
q = new_q
return dist
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
mn = min(len(x) for x in adj)
corners = [u for u in xrange(n) if len(adj[u]) == mn]
dist1 = bfs(corners[0])
corners.sort(key=lambda x: dist1[x])
dist2 = bfs(corners[1])
c = dist1[corners[1]]
r = n//c
result = [[0]*c for _ in range(r)]
for u in xrange(n):
i = ((dist1[u]+dist2[u])-(1+c))//2
j = (dist1[u]-1)-i
result[i][j] = u
return result
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/common/hooks/base_google.py | {
"start": 4700,
"end": 6083
} | class ____(tenacity.retry_if_exception):
"""Retries if there was an exception for refreshing credentials."""
def __init__(self):
super().__init__(is_refresh_credentials_exception)
# A fake project_id to use in functions decorated by fallback_to_default_project_id
# This allows the 'project_id' argument to be of type str instead of str | None,
# making it easier to type hint the function body without dealing with the None
# case that can never happen at runtime.
PROVIDE_PROJECT_ID: str = cast("str", None)
T = TypeVar("T", bound=Callable)
RT = TypeVar("RT")
# Sentinel value to distinguish "parameter not provided" from "parameter explicitly set to a value"
_UNSET = object()
def get_field(extras: dict, field_name: str) -> str | None:
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the 'extra__google_cloud_platform__' prefix "
"when using this method."
)
if field_name in extras:
value = extras[field_name]
return None if value == "" else value
prefixed_name = f"extra__google_cloud_platform__{field_name}"
value = extras.get(prefixed_name)
return None if value == "" else value
| retry_if_temporary_refresh_credentials |
python | FactoryBoy__factory_boy | tests/utils.py | {
"start": 1449,
"end": 2057
} | class ____(MultiModulePatcher):
def __init__(self, target_dt, *target_modules, **kwargs):
self.target_dt = target_dt
super().__init__(*target_modules, **kwargs)
def _build_patcher(self, target_module):
module_datetime = getattr(target_module, 'datetime')
return alter_time.mock_datetime_now(self.target_dt, module_datetime)
def evaluate_declaration(declaration, force_sequence=None):
kwargs = {'attr': declaration}
if force_sequence is not None:
kwargs['__sequence'] = force_sequence
return factory.build(dict, **kwargs)['attr']
| mocked_datetime_now |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.